diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index c1513c756af125ce5adb76ee8e4522e25572a099..e757ac8142d576f241362b2912d6296c603aba39 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -98,3 +98,13 @@ Description: The backing_dev file is read-write and set up backing device for zram to write incompressible pages. For using, user should enable CONFIG_ZRAM_WRITEBACK. + +What: /sys/block/zram/use_dedup +Date: March 2017 +Contact: Joonsoo Kim +Description: + The use_dedup file is read-write and specifies deduplication + feature is used or not. If enabled, duplicated data is + managed by reference count and will not be stored in memory + twice. Benefit of this feature largely depends on the workload + so keep attention when use. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 8355e79350b79cf8789719d20e48f34497fee93b..6cae60929cb6f4446e73b5104aaf356a4c4bf3a0 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -379,6 +379,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities @@ -390,3 +391,26 @@ Description: Information about CPU vulnerabilities "Not affected" CPU is not affected by the vulnerability "Vulnerable" CPU is affected and no mitigation in effect "Mitigation: $M" CPU is affected and mitigation $M is in effect + + Details about the l1tf file can be found in + Documentation/admin-guide/l1tf.rst + +What: /sys/devices/system/cpu/smt + /sys/devices/system/cpu/smt/active + /sys/devices/system/cpu/smt/control +Date: June 2018 +Contact: Linux kernel mailing list +Description: Control Symetric Multi Threading (SMT) + + active: Tells whether SMT is active (enabled and siblings online) + + control: Read/write interface to control SMT. Possible + values: + + "on" SMT is enabled + "off" SMT is disabled + "forceoff" SMT is force disabled. Cannot be changed. + "notsupported" SMT is not supported by the CPU + + If control status is "forceoff" or "notsupported" writes + are rejected. diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 372b88f4e706250123ef30b8a851b8c8bd02b8d2..41b8cb3b4525669e611f1981485a863df6649369 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -51,6 +51,14 @@ Description: Controls the dirty page count condition for the in-place-update policies. +What: /sys/fs/f2fs//min_seq_blocks +Date: August 2018 +Contact: "Jaegeuk Kim" +Description: + Controls the dirty page count condition for batched sequential + writes in ->writepages. + + What: /sys/fs/f2fs//min_hot_blocks Date: March 2017 Contact: "Jaegeuk Kim" diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst index 5bb9161dbe6a31be54992d412945289ce3a57587..78f8f00c369f22795016f875a3ace7db9c48950f 100644 --- a/Documentation/admin-guide/index.rst +++ b/Documentation/admin-guide/index.rst @@ -17,6 +17,15 @@ etc. kernel-parameters devices +This section describes CPU vulnerabilities and provides an overview of the +possible mitigations along with guidance for selecting mitigations if they +are configurable at compile, boot or run time. + +.. toctree:: + :maxdepth: 1 + + l1tf + Here is a set of documents aimed at users who are trying to track down problems and bugs in particular. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0613573ed177d8eb7c7dc35c4c09fd89f6338462..6af44985a48d625f9af4128ba65d01a633f50229 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1897,10 +1897,84 @@ (virtualized real and unpaged mode) on capable Intel chips. Default is 1 (enabled) + kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault + CVE-2018-3620. + + Valid arguments: never, cond, always + + always: L1D cache flush on every VMENTER. + cond: Flush L1D on VMENTER only when the code between + VMEXIT and VMENTER can leak host memory. + never: Disables the mitigation + + Default is cond (do L1 cache flush in specific instances) + kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification feature (tagged TLBs) on capable Intel chips. Default is 1 (enabled) + l1tf= [X86] Control mitigation of the L1TF vulnerability on + affected CPUs + + The kernel PTE inversion protection is unconditionally + enabled and cannot be disabled. + + full + Provides all available mitigations for the + L1TF vulnerability. Disables SMT and + enables all mitigations in the + hypervisors, i.e. unconditional L1D flush. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + full,force + Same as 'full', but disables SMT and L1D + flush runtime control. Implies the + 'nosmt=force' command line option. + (i.e. sysfs control of SMT is disabled.) + + flush + Leaves SMT enabled and enables the default + hypervisor mitigation, i.e. conditional + L1D flush. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + flush,nosmt + + Disables SMT and enables the default + hypervisor mitigation. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + flush,nowarn + Same as 'flush', but hypervisors will not + warn when a VM is started in a potentially + insecure configuration. + + off + Disables hypervisor mitigations and doesn't + emit any warnings. + + Default is 'flush'. + + For details see: Documentation/admin-guide/l1tf.rst + l2cr= [PPC] l3cr= [PPC] @@ -2604,6 +2678,10 @@ nosmt [KNL,S390] Disable symmetric multithreading (SMT). Equivalent to smt=1. + [KNL,x86] Disable symmetric multithreading (SMT). + nosmt=force: Force disable SMT, cannot be undone + via the sysfs control file. + nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 (indirect branch prediction) vulnerability. System may allow data leaks with this option, which is equivalent diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst new file mode 100644 index 0000000000000000000000000000000000000000..bae52b845de0b93af644ea55103d5a912dfca753 --- /dev/null +++ b/Documentation/admin-guide/l1tf.rst @@ -0,0 +1,610 @@ +L1TF - L1 Terminal Fault +======================== + +L1 Terminal Fault is a hardware vulnerability which allows unprivileged +speculative access to data which is available in the Level 1 Data Cache +when the page table entry controlling the virtual address, which is used +for the access, has the Present bit cleared or other reserved bits set. + +Affected processors +------------------- + +This vulnerability affects a wide range of Intel processors. The +vulnerability is not present on: + + - Processors from AMD, Centaur and other non Intel vendors + + - Older processor models, where the CPU family is < 6 + + - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft, + Penwell, Pineview, Silvermont, Airmont, Merrifield) + + - The Intel XEON PHI family + + - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the + IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected + by the Meltdown vulnerability either. These CPUs should become + available by end of 2018. + +Whether a processor is affected or not can be read out from the L1TF +vulnerability file in sysfs. See :ref:`l1tf_sys_info`. + +Related CVEs +------------ + +The following CVE entries are related to the L1TF vulnerability: + + ============= ================= ============================== + CVE-2018-3615 L1 Terminal Fault SGX related aspects + CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects + CVE-2018-3646 L1 Terminal Fault Virtualization related aspects + ============= ================= ============================== + +Problem +------- + +If an instruction accesses a virtual address for which the relevant page +table entry (PTE) has the Present bit cleared or other reserved bits set, +then speculative execution ignores the invalid PTE and loads the referenced +data if it is present in the Level 1 Data Cache, as if the page referenced +by the address bits in the PTE was still present and accessible. + +While this is a purely speculative mechanism and the instruction will raise +a page fault when it is retired eventually, the pure act of loading the +data and making it available to other speculative instructions opens up the +opportunity for side channel attacks to unprivileged malicious code, +similar to the Meltdown attack. + +While Meltdown breaks the user space to kernel space protection, L1TF +allows to attack any physical memory address in the system and the attack +works across all protection domains. It allows an attack of SGX and also +works from inside virtual machines because the speculation bypasses the +extended page table (EPT) protection mechanism. + + +Attack scenarios +---------------- + +1. Malicious user space +^^^^^^^^^^^^^^^^^^^^^^^ + + Operating Systems store arbitrary information in the address bits of a + PTE which is marked non present. This allows a malicious user space + application to attack the physical memory to which these PTEs resolve. + In some cases user-space can maliciously influence the information + encoded in the address bits of the PTE, thus making attacks more + deterministic and more practical. + + The Linux kernel contains a mitigation for this attack vector, PTE + inversion, which is permanently enabled and has no performance + impact. The kernel ensures that the address bits of PTEs, which are not + marked present, never point to cacheable physical memory space. + + A system with an up to date kernel is protected against attacks from + malicious user space applications. + +2. Malicious guest in a virtual machine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The fact that L1TF breaks all domain protections allows malicious guest + OSes, which can control the PTEs directly, and malicious guest user + space applications, which run on an unprotected guest kernel lacking the + PTE inversion mitigation for L1TF, to attack physical host memory. + + A special aspect of L1TF in the context of virtualization is symmetric + multi threading (SMT). The Intel implementation of SMT is called + HyperThreading. The fact that Hyperthreads on the affected processors + share the L1 Data Cache (L1D) is important for this. As the flaw allows + only to attack data which is present in L1D, a malicious guest running + on one Hyperthread can attack the data which is brought into the L1D by + the context which runs on the sibling Hyperthread of the same physical + core. This context can be host OS, host user space or a different guest. + + If the processor does not support Extended Page Tables, the attack is + only possible, when the hypervisor does not sanitize the content of the + effective (shadow) page tables. + + While solutions exist to mitigate these attack vectors fully, these + mitigations are not enabled by default in the Linux kernel because they + can affect performance significantly. The kernel provides several + mechanisms which can be utilized to address the problem depending on the + deployment scenario. The mitigations, their protection scope and impact + are described in the next sections. + + The default mitigations and the rationale for choosing them are explained + at the end of this document. See :ref:`default_mitigations`. + +.. _l1tf_sys_info: + +L1TF system information +----------------------- + +The Linux kernel provides a sysfs interface to enumerate the current L1TF +status of the system: whether the system is vulnerable, and which +mitigations are active. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/l1tf + +The possible values in this file are: + + =========================== =============================== + 'Not affected' The processor is not vulnerable + 'Mitigation: PTE Inversion' The host protection is active + =========================== =============================== + +If KVM/VMX is enabled and the processor is vulnerable then the following +information is appended to the 'Mitigation: PTE Inversion' part: + + - SMT status: + + ===================== ================ + 'VMX: SMT vulnerable' SMT is enabled + 'VMX: SMT disabled' SMT is disabled + ===================== ================ + + - L1D Flush mode: + + ================================ ==================================== + 'L1D vulnerable' L1D flushing is disabled + + 'L1D conditional cache flushes' L1D flush is conditionally enabled + + 'L1D cache flushes' L1D flush is unconditionally enabled + ================================ ==================================== + +The resulting grade of protection is discussed in the following sections. + + +Host mitigation mechanism +------------------------- + +The kernel is unconditionally protected against L1TF attacks from malicious +user space running on the host. + + +Guest mitigation mechanisms +--------------------------- + +.. _l1d_flush: + +1. L1D flush on VMENTER +^^^^^^^^^^^^^^^^^^^^^^^ + + To make sure that a guest cannot attack data which is present in the L1D + the hypervisor flushes the L1D before entering the guest. + + Flushing the L1D evicts not only the data which should not be accessed + by a potentially malicious guest, it also flushes the guest + data. Flushing the L1D has a performance impact as the processor has to + bring the flushed guest data back into the L1D. Depending on the + frequency of VMEXIT/VMENTER and the type of computations in the guest + performance degradation in the range of 1% to 50% has been observed. For + scenarios where guest VMEXIT/VMENTER are rare the performance impact is + minimal. Virtio and mechanisms like posted interrupts are designed to + confine the VMEXITs to a bare minimum, but specific configurations and + application scenarios might still suffer from a high VMEXIT rate. + + The kernel provides two L1D flush modes: + - conditional ('cond') + - unconditional ('always') + + The conditional mode avoids L1D flushing after VMEXITs which execute + only audited code paths before the corresponding VMENTER. These code + paths have been verified that they cannot expose secrets or other + interesting data to an attacker, but they can leak information about the + address space layout of the hypervisor. + + Unconditional mode flushes L1D on all VMENTER invocations and provides + maximum protection. It has a higher overhead than the conditional + mode. The overhead cannot be quantified correctly as it depends on the + workload scenario and the resulting number of VMEXITs. + + The general recommendation is to enable L1D flush on VMENTER. The kernel + defaults to conditional mode on affected processors. + + **Note**, that L1D flush does not prevent the SMT problem because the + sibling thread will also bring back its data into the L1D which makes it + attackable again. + + L1D flush can be controlled by the administrator via the kernel command + line and sysfs control files. See :ref:`mitigation_control_command_line` + and :ref:`mitigation_control_kvm`. + +.. _guest_confinement: + +2. Guest VCPU confinement to dedicated physical cores +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + To address the SMT problem, it is possible to make a guest or a group of + guests affine to one or more physical cores. The proper mechanism for + that is to utilize exclusive cpusets to ensure that no other guest or + host tasks can run on these cores. + + If only a single guest or related guests run on sibling SMT threads on + the same physical core then they can only attack their own memory and + restricted parts of the host memory. + + Host memory is attackable, when one of the sibling SMT threads runs in + host OS (hypervisor) context and the other in guest context. The amount + of valuable information from the host OS context depends on the context + which the host OS executes, i.e. interrupts, soft interrupts and kernel + threads. The amount of valuable data from these contexts cannot be + declared as non-interesting for an attacker without deep inspection of + the code. + + **Note**, that assigning guests to a fixed set of physical cores affects + the ability of the scheduler to do load balancing and might have + negative effects on CPU utilization depending on the hosting + scenario. Disabling SMT might be a viable alternative for particular + scenarios. + + For further information about confining guests to a single or to a group + of cores consult the cpusets documentation: + + https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt + +.. _interrupt_isolation: + +3. Interrupt affinity +^^^^^^^^^^^^^^^^^^^^^ + + Interrupts can be made affine to logical CPUs. This is not universally + true because there are types of interrupts which are truly per CPU + interrupts, e.g. the local timer interrupt. Aside of that multi queue + devices affine their interrupts to single CPUs or groups of CPUs per + queue without allowing the administrator to control the affinities. + + Moving the interrupts, which can be affinity controlled, away from CPUs + which run untrusted guests, reduces the attack vector space. + + Whether the interrupts with are affine to CPUs, which run untrusted + guests, provide interesting data for an attacker depends on the system + configuration and the scenarios which run on the system. While for some + of the interrupts it can be assumed that they won't expose interesting + information beyond exposing hints about the host OS memory layout, there + is no way to make general assumptions. + + Interrupt affinity can be controlled by the administrator via the + /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is + available at: + + https://www.kernel.org/doc/Documentation/IRQ-affinity.txt + +.. _smt_control: + +4. SMT control +^^^^^^^^^^^^^^ + + To prevent the SMT issues of L1TF it might be necessary to disable SMT + completely. Disabling SMT can have a significant performance impact, but + the impact depends on the hosting scenario and the type of workloads. + The impact of disabling SMT needs also to be weighted against the impact + of other mitigation solutions like confining guests to dedicated cores. + + The kernel provides a sysfs interface to retrieve the status of SMT and + to control it. It also provides a kernel command line interface to + control SMT. + + The kernel command line interface consists of the following options: + + =========== ========================================================== + nosmt Affects the bring up of the secondary CPUs during boot. The + kernel tries to bring all present CPUs online during the + boot process. "nosmt" makes sure that from each physical + core only one - the so called primary (hyper) thread is + activated. Due to a design flaw of Intel processors related + to Machine Check Exceptions the non primary siblings have + to be brought up at least partially and are then shut down + again. "nosmt" can be undone via the sysfs interface. + + nosmt=force Has the same effect as "nosmt" but it does not allow to + undo the SMT disable via the sysfs interface. + =========== ========================================================== + + The sysfs interface provides two files: + + - /sys/devices/system/cpu/smt/control + - /sys/devices/system/cpu/smt/active + + /sys/devices/system/cpu/smt/control: + + This file allows to read out the SMT control state and provides the + ability to disable or (re)enable SMT. The possible states are: + + ============== =================================================== + on SMT is supported by the CPU and enabled. All + logical CPUs can be onlined and offlined without + restrictions. + + off SMT is supported by the CPU and disabled. Only + the so called primary SMT threads can be onlined + and offlined without restrictions. An attempt to + online a non-primary sibling is rejected + + forceoff Same as 'off' but the state cannot be controlled. + Attempts to write to the control file are rejected. + + notsupported The processor does not support SMT. It's therefore + not affected by the SMT implications of L1TF. + Attempts to write to the control file are rejected. + ============== =================================================== + + The possible states which can be written into this file to control SMT + state are: + + - on + - off + - forceoff + + /sys/devices/system/cpu/smt/active: + + This file reports whether SMT is enabled and active, i.e. if on any + physical core two or more sibling threads are online. + + SMT control is also possible at boot time via the l1tf kernel command + line parameter in combination with L1D flush control. See + :ref:`mitigation_control_command_line`. + +5. Disabling EPT +^^^^^^^^^^^^^^^^ + + Disabling EPT for virtual machines provides full mitigation for L1TF even + with SMT enabled, because the effective page tables for guests are + managed and sanitized by the hypervisor. Though disabling EPT has a + significant performance impact especially when the Meltdown mitigation + KPTI is enabled. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. + +There is ongoing research and development for new mitigation mechanisms to +address the performance impact of disabling SMT or EPT. + +.. _mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +The kernel command line allows to control the L1TF mitigations at boot +time with the option "l1tf=". The valid arguments for this option are: + + ============ ============================================================= + full Provides all available mitigations for the L1TF + vulnerability. Disables SMT and enables all mitigations in + the hypervisors, i.e. unconditional L1D flushing + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + full,force Same as 'full', but disables SMT and L1D flush runtime + control. Implies the 'nosmt=force' command line option. + (i.e. sysfs control of SMT is disabled.) + + flush Leaves SMT enabled and enables the default hypervisor + mitigation, i.e. conditional L1D flushing + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + flush,nosmt Disables SMT and enables the default hypervisor mitigation, + i.e. conditional L1D flushing. + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is + started in a potentially insecure configuration. + + off Disables hypervisor mitigations and doesn't emit any + warnings. + ============ ============================================================= + +The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. + + +.. _mitigation_control_kvm: + +Mitigation control for KVM - module parameter +------------------------------------------------------------- + +The KVM hypervisor mitigation mechanism, flushing the L1D cache when +entering a guest, can be controlled with a module parameter. + +The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the +following arguments: + + ============ ============================================================== + always L1D cache flush on every VMENTER. + + cond Flush L1D on VMENTER only when the code between VMEXIT and + VMENTER can leak host memory which is considered + interesting for an attacker. This still can leak host memory + which allows e.g. to determine the hosts address space layout. + + never Disables the mitigation + ============ ============================================================== + +The parameter can be provided on the kernel command line, as a module +parameter when loading the modules and at runtime modified via the sysfs +file: + +/sys/module/kvm_intel/parameters/vmentry_l1d_flush + +The default is 'cond'. If 'l1tf=full,force' is given on the kernel command +line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush +module parameter is ignored and writes to the sysfs file are rejected. + + +Mitigation selection guide +-------------------------- + +1. No virtualization in use +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The system is protected by the kernel unconditionally and no further + action is required. + +2. Virtualization with trusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If the guest comes from a trusted source and the guest OS kernel is + guaranteed to have the L1TF mitigations in place the system is fully + protected against L1TF and no further action is required. + + To avoid the overhead of the default L1D flushing on VMENTER the + administrator can disable the flushing via the kernel command line and + sysfs control files. See :ref:`mitigation_control_command_line` and + :ref:`mitigation_control_kvm`. + + +3. Virtualization with untrusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +3.1. SMT not supported or disabled +"""""""""""""""""""""""""""""""""" + + If SMT is not supported by the processor or disabled in the BIOS or by + the kernel, it's only required to enforce L1D flushing on VMENTER. + + Conditional L1D flushing is the default behaviour and can be tuned. See + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. + +3.2. EPT not supported or disabled +"""""""""""""""""""""""""""""""""" + + If EPT is not supported by the processor or disabled in the hypervisor, + the system is fully protected. SMT can stay enabled and L1D flushing on + VMENTER is not required. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. + +3.3. SMT and EPT supported and active +""""""""""""""""""""""""""""""""""""" + + If SMT and EPT are supported and active then various degrees of + mitigations can be employed: + + - L1D flushing on VMENTER: + + L1D flushing on VMENTER is the minimal protection requirement, but it + is only potent in combination with other mitigation methods. + + Conditional L1D flushing is the default behaviour and can be tuned. See + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. + + - Guest confinement: + + Confinement of guests to a single or a group of physical cores which + are not running any other processes, can reduce the attack surface + significantly, but interrupts, soft interrupts and kernel threads can + still expose valuable data to a potential attacker. See + :ref:`guest_confinement`. + + - Interrupt isolation: + + Isolating the guest CPUs from interrupts can reduce the attack surface + further, but still allows a malicious guest to explore a limited amount + of host physical memory. This can at least be used to gain knowledge + about the host address space layout. The interrupts which have a fixed + affinity to the CPUs which run the untrusted guests can depending on + the scenario still trigger soft interrupts and schedule kernel threads + which might expose valuable information. See + :ref:`interrupt_isolation`. + +The above three mitigation methods combined can provide protection to a +certain degree, but the risk of the remaining attack surface has to be +carefully analyzed. For full protection the following methods are +available: + + - Disabling SMT: + + Disabling SMT and enforcing the L1D flushing provides the maximum + amount of protection. This mitigation is not depending on any of the + above mitigation methods. + + SMT control and L1D flushing can be tuned by the command line + parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run + time with the matching sysfs control files. See :ref:`smt_control`, + :ref:`mitigation_control_command_line` and + :ref:`mitigation_control_kvm`. + + - Disabling EPT: + + Disabling EPT provides the maximum amount of protection as well. It is + not depending on any of the above mitigation methods. SMT can stay + enabled and L1D flushing is not required, but the performance impact is + significant. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' + parameter. + +3.4. Nested virtual machines +"""""""""""""""""""""""""""" + +When nested virtualization is in use, three operating systems are involved: +the bare metal hypervisor, the nested hypervisor and the nested virtual +machine. VMENTER operations from the nested hypervisor into the nested +guest will always be processed by the bare metal hypervisor. If KVM is the +bare metal hypervisor it wiil: + + - Flush the L1D cache on every switch from the nested hypervisor to the + nested virtual machine, so that the nested hypervisor's secrets are not + exposed to the nested virtual machine; + + - Flush the L1D cache on every switch from the nested virtual machine to + the nested hypervisor; this is a complex operation, and flushing the L1D + cache avoids that the bare metal hypervisor's secrets are exposed to the + nested virtual machine; + + - Instruct the nested hypervisor to not perform any L1D cache flush. This + is an optimization to avoid double L1D flushing. + + +.. _default_mitigations: + +Default mitigations +------------------- + + The kernel default mitigations for vulnerable processors are: + + - PTE inversion to protect against malicious user space. This is done + unconditionally and cannot be controlled. + + - L1D conditional flushing on VMENTER when EPT is enabled for + a guest. + + The kernel does not by default enforce the disabling of SMT, which leaves + SMT systems vulnerable when running untrusted guests with EPT enabled. + + The rationale for this choice is: + + - Force disabling SMT can break existing setups, especially with + unattended updates. + + - If regular users run untrusted guests on their machine, then L1TF is + just an add on to other malware which might be embedded in an untrusted + guest, e.g. spam-bots or attacks on the local network. + + There is no technical way to prevent a user from running untrusted code + on their machines blindly. + + - It's technically extremely unlikely and from today's knowledge even + impossible that L1TF can be exploited via the most popular attack + mechanisms like JavaScript because these mechanisms have no way to + control PTEs. If this would be possible and not other mitigation would + be possible, then the default might be different. + + - The administrators of cloud and hosting setups have to carefully + analyze the risk for their scenarios and make the appropriate + mitigation choices, which might even vary across their deployed + machines and also result in other changes of their overall setup. + There is no way for the kernel to provide a sensible default for this + kind of scenarios. diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 257e65714c6a216f3fce9ebce7398eb821f96176..b81c2d4a2167e4b36215fc0fe51fd3eadb0cfb2e 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -169,7 +169,7 @@ comp_algorithm RW show and change the compression algorithm compact WO trigger memory compaction debug_stat RO this file is used for zram debugging purposes backing_dev RW set up backend storage for zram to write out - +use_dedup RW show and set deduplication feature User space is advised to use the following files to read the device statistics. @@ -218,6 +218,8 @@ line of text and contains the following stats separated by whitespace: same_pages the number of same element filled pages written to this disk. No memory is allocated for such pages. pages_compacted the number of pages freed during compaction + dup_data_size deduplicated data size + meta_data_size the amount of metadata allocated for deduplication feature 9) Deactivate: swapoff /dev/zram0 diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt index e63d09b4c6da246630f1165f9573bd0de433a991..03b1efdd60f0f10f1fc0e4678bdd6f5bc4f17c9d 100644 --- a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt +++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt @@ -29,7 +29,13 @@ Optional properties for child nodes: - qcom,allocate-boot-time: Indicates whether clients needs boot time memory allocation. -Example: +- qcom,allocate-on-request: Indicates memory allocation happens only upon client request + +Note: qcom,allocate-boot-time and qcom,allocate-on-request are mutually exclusive rite now. + +- qcom,guard-band: Indicates addition of a guard band memory allocation in addition to the client's memory region. + +Example 1: qcom,memshare { compatible = "qcom,memshare"; @@ -42,3 +48,18 @@ qcom,memshare { label = "modem"; }; }; + +Example 2: + +qcom,memshare { + compatible = "qcom,memshare"; + + qcom,client_3 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x500000>; + qcom,client-id = <1>; + qcom,allocate-on-request; + qcom,guard-band; + label = "modem"; + }; +}; \ No newline at end of file diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index f0e27cad2427ca9e4de8fb39332178dff1a42680..562827c3402905c6e6c363b81eba0a9d1478bd6c 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -62,6 +62,9 @@ SoCs: - SDMMAGPIE compatible = "qcom,sdmmagpie" +- TRINKET + compatible = "qcom,trinket" + Generic board variants: - CDP device: @@ -178,3 +181,7 @@ compatible = "qcom,sdxprairie-cdp" compatible = "qcom,sdmmagpie-rumi" compatible = "qcom,sdmmagpie-idp" compatible = "qcom,sdmmagpie-qrd" +compatible = "qcom,sdmmagpiep-idp" +compatible = "qcom,sdmmagpiep" +compatible = "qcom,sdmmagpiep-qrd" +compatible = "qcom,trinket-rumi" diff --git a/Documentation/devicetree/bindings/arm/msm/msm_aop_ddr_msgs.txt b/Documentation/devicetree/bindings/arm/msm/msm_aop_ddr_msgs.txt new file mode 100644 index 0000000000000000000000000000000000000000..de4160ae1a47914906be06b78eea04eed9a4278b --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/msm_aop_ddr_msgs.txt @@ -0,0 +1,18 @@ +AOP (Always-On-Processor) DDR Related Messaging + +The AOP DDR messaging driver is used to send messages to the AOP, +using the mailbox interface, to lower the DDR frequency during reboot. + +Required properties + +- compatible : "qcom,aop-ddr-msgs" +- mbox : QMP mailbox phandle and channel identifier + +Optional properties: +-mbox-name: name of the mailbox + +Example: + qcom,aop-ddr-msgs { + compatible = "qcom,aop-ddr-msgs"; + mboxes = <&qmp_aop 0>; + }; diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt index 0c75cf64e4d21030b8d01f7e85ed855ab71371e4..23e4bd5f42c192dc45f9ac938eade29a78fbb2e6 100644 --- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt +++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt @@ -25,6 +25,7 @@ Required properties: Optional properties: - qcom,sleep-clk-en: GPIO for sleep clock used for low power modes by 11ad card - qcom,wigig-en: Enable GPIO connected to 11ad card +- qcom,wigig-dc: Enable DC to DC GPIO connected to 11ad card - qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply - vdd-supply: phandle to 11ad VDD regulator node - vddio-supply: phandle to 11ad VDDIO regulator node @@ -45,6 +46,7 @@ Example: qcom,smmu-mapping = <0x20000000 0xe0000000>; qcom,pcie-parent = <&pcie1>; qcom,wigig-en = <&tlmm 94 0>; + qcom,wigig-dc = <&tlmm 81 0>; qcom,msm-bus,name = "wil6210"; qcom,msm-bus,num-cases = <2>; qcom,msm-bus,num-paths = <1>; diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt index d00fd94a4812aa761f23e67dce074aaecf41437d..f0ac66bae4f60ec0879c3aff9de96229207f5c89 100644 --- a/Documentation/devicetree/bindings/batterydata/batterydata.txt +++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt @@ -82,10 +82,15 @@ Profile data node optional properties: If yes, the low and high thresholds defined in "qcom,step-chg-ranges" tuples should be assigned as SoC values in percentage. +- qcom,ocv-based-step-chg: A bool property to indicate if the battery will + perform OCV (Open Circuit Voltage) based step charging. + If yes, the low and high thresholds defined in + "qcom,step-chg-ranges" tuples should be assigned as + OCV values in microvolts. - qcom,step-chg-ranges: Array of tuples in which a tuple describes a range data of step charging setting. A range contains following 3 integer elements: - [0]: the low threshold of battery votlage in uV + [0]: the low threshold of battery voltage in uV or SoC (State of Charge) in percentage when SoC based step charge is used; [1]: the high threshold of battery voltage in uV diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt index fd4f1f10f3ab3aeb429aa01e85329d60b7b7ac57..3f5087692ccfa928abb590b6dd692ed070102049 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt @@ -25,6 +25,7 @@ Required properties : "qcom,gcc-mdss-qcs405" "qcom,gcc-sm6150" "qcom,gcc-sdmmagpie" + "qcom,gcc-sdxprairie" - reg : shall contain base register location and length - #clock-cells : shall contain 1 diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt index db9fc37009122234ad44ad43287651a5d5574148..ece6b9e5b769e413ceae8a7ceeb2eb9bc34ece96 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt @@ -5,7 +5,8 @@ Required properties: "qcom,rpmh-clk-sm6150", "qcom,rpmh-clk-sm8150", "qcom,rpmh-clk-sdmshrike", - "qcom,rpmh-clk-sdmmagpie". + "qcom,rpmh-clk-sdmmagpie" + "qcom,rpmh-clk-sdxprairie". - #clock-cells: Must contain 1. - mboxes: List of RPMh mailbox phandle and channel identifier tuples. - mbox-names: List of names to identify the RPMh mailboxes used. diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt index 06a60e2bfcdf7e467ff9d6bff3c7ae77d0bd4e23..751e498597e1fc1b9897fc9abe4b728a4bfafce8 100644 --- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt +++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt @@ -35,6 +35,10 @@ Optional properties: - vdd-wlan-xtal-aon-supply: phandle to the LDO-4 regulator. This is needed on platforms where XTAL regulator depends on always on regulator in VDDmin. + - vdd-wlan-ctrl1-supply: phandle to the DBU1 - 1.8V for QCA6595 or 3.3V for + QCA6174 on auto platform. + - vdd-wlan-ctrl2-supply: phandle to the DBU4 - 2.2V for QCA6595 or 3.85V for + QCA6696 on auto platform. - vdd-wlan-core-supply: phandle to the 1.3V CORE regulator for QCA6174 - vdd-wlan-sp2t-supply: phandle to the 2.7V SP2T regulator for QCA6174 - qcom,smmu-s1-enable: Boolean property to decide whether to enable SMMU diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt index 3be4d8c7370744cd5e790261aabddce5bbbc27ff..e9d41ebcb8232b2d07681a970027c5c97abc277a 100644 --- a/Documentation/devicetree/bindings/cnss/icnss.txt +++ b/Documentation/devicetree/bindings/cnss/icnss.txt @@ -29,10 +29,13 @@ Optional properties: - qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs. - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass - qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory - - qcom,gpio-force-fatal-error: SMP2P bit triggered by WLAN FW to force error fatal. - - qcom,gpio-early-crash-ind: SMP2P bit triggered by WLAN FW to indicate FW is in assert. - qcom,hyp_disabled: Boolean context flag to disable hyperviser +WLAN SMP2P sub nodes + + - qcom,smp2p_map_wlan_1_in - represents the in smp2p to + wlan driver from modem. + Example: qcom,icnss@0a000000 { @@ -62,7 +65,11 @@ Example: qcom,smmu-s1-bypass; vdd-0.8-cx-mx-supply = <&pm8998_l5>; qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>; - qcom,gpio-forced-fatal-error = <&smp2pgpio_wlan_1_in 0 0>; - qcom,gpio-early-crash-ind = <&smp2pgpio_wlan_1_in 1 0>; qcom,hyp_disabled; + qcom,smp2p_map_wlan_1_in { + interrupts-extended = <&smp2p_wlan_1_in 0 0>, + <&smp2p_wlan_1_in 1 0>; + interrupt-names = "qcom,smp2p-force-fatal-error", + "qcom,smp2p-early-crash-ind"; + }; }; diff --git a/Documentation/devicetree/bindings/edac/gic600-edac.txt b/Documentation/devicetree/bindings/edac/gic600-edac.txt new file mode 100644 index 0000000000000000000000000000000000000000..53490a5365930743a1a82b4fbda98fe733176983 --- /dev/null +++ b/Documentation/devicetree/bindings/edac/gic600-edac.txt @@ -0,0 +1,40 @@ +* GIC EDAC node: + +GIC EDAC node is defined to describe on-chip error or fault detection and correction +for GIC RAM. GICT(GIC trace and debug) register map describes the syndrome information +and GICT support is available from GIC-600 onwards. + +GIC EDAC reports all Single Bit Error and Double Bit Error found in GIC RAM. +Current GIC ECC implementation is interrupt based. GIC scrub feature helps +to avoid potential errors accumulation of particular error location by a write-back +of all valid RAM entries periodically. + +GICT identifies software programming errors, correctable and uncorrectable +errors of Shared Peripheral Interrupt(SPI) RAM, Software Generated Interrupt(SGI) RAM, +Private Peripheral Interrupt(PPI) RAM, Locality-Specific Peripheral Interrupt(LPI) RAM +and Interrupt Translation Service (ITS) RAM. +For every error record following information available - + ERRXSTATUS - Error Record Status Register + ERRXMISC0 - Error Record Miscellaneous Register 0 + ERRXMISC1 - Error Record Miscellaneous Register 1 + ERRXADDR - Error Record Address Resister + +The following section describes the DT node binding for gic-erp. + +Required properties: +- compatible : "arm,gic-600-erp". +- reg-names : GIC Trace, GIC Distributor, GIC Redistributor bases +- interrupt-config : SPI interrupt numbers of Fault and Error interrupts + to configure GICT_ERRORQCR0, GICT_ERRORQCR1 registers respectively. +- interrupts : Interrupts for Fault and Error IRQs + +gict: gict@17a20000 { + compatible = "arm,gic-600-erp"; + reg = <0x17a20000 0x10000>; /* GICT */ + reg-names = "gict-base"; + interrupt-config = <46, 17>; + interrupt-names = "gict-fault", "gict-err"; + interrupts = , + ; +}; + diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt index d1e79295bc3f3b93607cb9c7501e12eb63b30ad2..18d5518e338e7b9d009c344a5e73d2e42653693d 100644 --- a/Documentation/devicetree/bindings/fb/mdss-pll.txt +++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt @@ -19,7 +19,7 @@ Required properties: "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm", "qcom,mdss_dsi_pll_7nm", "qcom,mdss_dp_pll_7nm", "qcom,mdss_dsi_pll_28lpm", "qcom,mdss_dsi_pll_14nm", - "qcom,mdss_dp_pll_14nm" + "qcom,mdss_dp_pll_14nm", "qcom,mdss_hdmi_pll_28lpm" - cell-index: Specifies the controller used - reg: offset and length of the register set for the device. - reg-names : names to refer to register sets related to this device diff --git a/Documentation/devicetree/bindings/gpu/adreno-gmu.txt b/Documentation/devicetree/bindings/gpu/adreno-gmu.txt index 251ffc07bcf398b9f6c003d40778e29dc6c2a01c..628675ef806ab85564dd98bd95eea978f81ada25 100644 --- a/Documentation/devicetree/bindings/gpu/adreno-gmu.txt +++ b/Documentation/devicetree/bindings/gpu/adreno-gmu.txt @@ -3,6 +3,7 @@ Qualcomm Technologies, Inc. GPU Graphics Management Unit (GMU) Required properties: - compatible : - "qcom,gpu-gmu" + - "qcom,gpu-rgmu" - reg: Specifies the PDC register base address and size. - reg-names: Resource names used for the physical address and length of PDC registers. diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt index cf84dba7fece7d55aafcd6a2909e4a3cc50f3886..94aa89fc6c0253280b75958dcfaedadc876c0d88 100644 --- a/Documentation/devicetree/bindings/gpu/adreno.txt +++ b/Documentation/devicetree/bindings/gpu/adreno.txt @@ -85,6 +85,8 @@ DCVS Core info Optional Properties: - qcom,initial-powerlevel: This value indicates which qcom,gpu-pwrlevel should be used at start time and when coming back out of resume +- qcom,throttle-pwrlevel: This value indicates which qcom,gpu-pwrlevel LM throttling + may start to occur - qcom,bus-control: Boolean. Enables an independent bus vote from the gpu frequency - qcom,bus-width: Bus width in number of bytes. This enables dynamic AB bus voting based on bus width and actual bus transactions. diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt index cef1fe76443a038266fa0ed701ca7f2e442e2cb5..b1e6a504733cf615a2622b3a930abd6f23c847eb 100644 --- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt +++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt @@ -9,6 +9,7 @@ Required properties: - "qcom,sm6150-vidc" : Invokes driver specific data for SM6150. - "qcom,sm8150-vidc" : Invokes driver specific data for SM8150. - "qcom,sm6150-vidc" : Invokes driver specific data for SM6150. + - "qcom,sdmmagpie-vidc" : Invokes driver specific data for sdmmagpie. - "qcom,sdm845-vidc" : Invokes driver specific data for SDM845. - "qcom,sdm670-vidc" : Invokes driver specific data for SDM670. diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..30174680a91dfbea2ebac23b08cfdda860ffad61 --- /dev/null +++ b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt @@ -0,0 +1,46 @@ +MSM MHI DEV + +MSM MHI DEV enables communication with the host over a PCIe link using the +Modem Host Interface protocol. The driver interfaces with the IPA for +enabling the HW acceleration channel path and provides interface for +software channels to communicate between Host and device. + +Required properties: + - compatible: should be "qcom,msm-mhi-dev" for MHI device driver. + - reg: MHI MMIO physical register space. + - reg-names: resource names used for the MHI MMIO physical address region, + IPA uC command and event ring doorbell mail box address. + Should be "mhi_mmio_base" for MHI MMIO physical address, + "ipa_uc_mbox_crdb" for IPA uC Command Ring doorbell, + "ipa_uc_mbox_erdb" for IPA uC Event Ring doorbell passed to + the IPA driver. + - qcom,mhi-ifc-id: ID of HW interface via which MHI on device side + communicates with host side. + - qcom,mhi-ep-msi: End point MSI number. + - qcom,mhi-version: MHI specification version supported by the device. + +Optional property: + - qcom,use-ipa-software-channel: If property is present use IPA hardware + accelerated path for MHI software channel data transfers + between host and device. + - qcom,mhi-config-iatu: If property is present map the control and data region + between host and device using iatu. + - qcom,mhi-interrupt: If property is present register for mhi interrupt. + - qcom,mhi-local-pa-base: The physical base address on the device used by the + MHI device driver to map the control and data region with the + MHI driver on the host. This property is required if iatu + property qcom,mhi-config-iatu is present. + +Example: + + mhi: qcom,msm-mhi-dev { + compatible = "qcom,msm-mhi-dev"; + reg = <0xfc527000 0x1000>, + <0xfd4fa000 0x1>, + <0xfd4fa080 0x1>; + reg-names = "mhi_mmio_base", "ipa_uc_mbox_crdb", + "ipa_uc_mbox_erdb"; + qcom,mhi-ifc-id = <0x030017cb>; + qcom,mhi-ep-msi = <1>; + qcom,mhi-version = <0x1000000>; + }; diff --git a/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt new file mode 100644 index 0000000000000000000000000000000000000000..233c7cc73b642ebcf58889168bbfc516a8bc71f6 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/fpc,fpc1028.txt @@ -0,0 +1,85 @@ +Fingerprint Cards AB. Fpc1028 driver + +The fpc1028 fingerprint sensor is connected to the host processor via SPI. +The sensor will generates interrupts when the user touches the sensor. +The host controller is expected to read data over SPI and pass the data to +the rest of the system. + +This binding document describes the properties for this module. + +Properties: + +- compatible + Usage: required + Value type: + Definition: It must be "fpc,fpc1020" + +- interrupts + Usage: required + Value type: + Definition: Peripheral interrupt specifier. + +- interrupt-parent + Usage: required + Value type: + Definition: phandle of the interrupt controller which services the + summary interrupt. + +- fpc,gpio_rst + Usage: required + Value type: + Definition: GPIO which connecting to the reset pin of fpc1028 + +- fpc,gpio_irq + Usage: required + Value type: + Definition: Specifies the GPIO which connecting to the irq pin of fpc1028. + +- vcc_spi-supply + Usage: required + Value type: + Definition: The phandle of the regulator which supplies fpc1028 spi bus core. + +- vcc_io-supply + Usage: required + Value type: + Definition: The phandle of the regulator which supplies fpc1028 io pins. + +- vcc_ana-supply + Usage: required + Value type: + Definition: The phandle of the regulator which supplies fpc1028 analog circuit. + +- pinctrl-names: + Usage: required + Value type: + Definition: Pinctrl state names for each pin group configuration. + eg:"fpc1020_reset_reset", "fpc1020_reset_active", "fpc1020_irq_active". + refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt" + +- pinctrl-n: + Usage: required + Value type: + Definition: pinctrl state for each pin group + refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt" + + +Example: + + fpc1020 { + compatible = "fpc,fpc1020"; + interrupt-parent = <&tlmm>; + interrupts = <48 0>; + fpc,gpio_rst = <&tlmm 124 0x0>; + fpc,gpio_irq = <&tlmm 48 0>; + vcc_spi-supply = <&pm8953_l5>; + vdd_io-supply = <&pm8953_l5>; + vdd_ana-supply = <&pm8953_l5>; + fpc,enable-on-boot; + pinctrl-names = "fpc1020_reset_reset", + "fpc1020_reset_active", + "fpc1020_irq_active"; + pinctrl-0 = <&msm_gpio_124>; + pinctrl-1 = <&msm_gpio_124_output_high>; + pinctrl-2 = <&msm_gpio_48>; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,trinket-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,trinket-pinctrl.txt new file mode 100644 index 0000000000000000000000000000000000000000..893fbd9880d01bffed1304c032914fc5da880adc --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,trinket-pinctrl.txt @@ -0,0 +1,187 @@ +Qualcomm Technologies, Inc. TRINKET TLMM block + +This binding describes the Top Level Mode Multiplexer block found in the +TRINKET platform. + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,trinket-pinctrl" + +- reg: + Usage: required + Value type: + Definition: the base address and size of the TLMM register space. + +- interrupts: + Usage: required + Value type: + Definition: should specify the TLMM summary IRQ. + +- interrupt-controller: + Usage: required + Value type: + Definition: identifies this node as an interrupt controller + +- #interrupt-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +- gpio-controller: + Usage: required + Value type: + Definition: identifies this node as a gpio controller + +- #gpio-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for +a general description of GPIO and interrupt bindings. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin, a group, or a list of pins or groups. This configuration can include the +mux function to select on those pin(s)/group(s), and various pin configuration +parameters, such as pull-up, drive strength, etc. + + +PIN CONFIGURATION NODES: + + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. + + Valid pins are: + gpio0-gpio132 + Supports mux, bias and drive-strength + + sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd, + sdc2_data sdc1_rclk + Supports bias and drive-strength + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Functions are only valid for gpio pins. + Valid values are: + + blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens, + bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8, + qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b, + dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10, + blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12, + mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11, + atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char, + cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b, + pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c, + qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4, + qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5, + atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6, + atest_usb20, atest_char0, dac_calib10, qdss_stm10, + qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6, + blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11, + qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1, + qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11, + dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6, + qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14, + dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem, + dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto, + dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0, + dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25, + sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2, + qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3, + uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9, + blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7, + qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11, + blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0, + cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4, + blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4, + qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus, + isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s, + qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b, + sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b, + gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12, + qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29, + tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27, + qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk, + sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b, + sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b, + ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b, + blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt, + pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11, + qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx, + qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3, + gpio + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configued as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull down. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull up. + +- output-high: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + high. + Not valid for sdc pins. + +- output-low: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + low. + Not valid for sdc pins. + +- drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins, in mA. + Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16 + +Example: + + tlmm: pinctrl@400000 { + compatible = "qcom,trinket-pinctrl"; + reg = <0x400000 0xc00000>; + interrupts = <0 227 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt index 83f964d9d565c79b59b3f5e4e303c149e7755477..1e49e964244316bd7196acbc90c13ae3ab066bcf 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt @@ -308,6 +308,20 @@ First Level Node - QGAUGE device Definition: Boolean property to support external-rsense based configuration. +- qcom,shutdown-temp-diff + Usage: optional + Value type: + Definition: The allowed battery temperature in deci-degree difference + between shutdown and power-on to continue with the shutdown + SOC. If not specified the default value is 6 degrees C (60). + +- qcom,shutdown-soc-threshold + Usage: optional + Value type: + Definition: The SOC difference allowed between PON and SHUTDOWN SOC + for the shutdown SOC to be used. If the difference is + beyond this value the PON SOC is used. + ========================================================== Second Level Nodes - Peripherals managed by QGAUGE driver ========================================================== diff --git a/Documentation/devicetree/bindings/regulator/fan53555.txt b/Documentation/devicetree/bindings/regulator/fan53555.txt index 90ca0b70d55526b2879221a8acf55ccf979d0c31..5be72e114a595d601773275750840528c48ddd82 100644 --- a/Documentation/devicetree/bindings/regulator/fan53555.txt +++ b/Documentation/devicetree/bindings/regulator/fan53555.txt @@ -1,8 +1,8 @@ Binding for Fairchild FAN53555 regulators Required properties: - - compatible: one of "fcs,fan53555", "silergy,syr827", "silergy,syr828" or - "halo,hl7509" + - compatible: one of "fcs,fan53555", "silergy,syr827", "silergy,syr828", + "halo,hl7509" or "halo,hl7503" - reg: I2C address Optional properties: diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt index e80b3b39c94de1de01e5adb6c4dcc4654a9af526..6fae4187b0522c0c72f92a341bd9740096007550 100644 --- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt @@ -31,6 +31,14 @@ First Level Node - LCDB module Value type: Definition: Phandle to the PMIC's revid node +- qcom,voltage-step-ramp + Usage: optional + Value type: + Definition: Required only if the voltage needs to be set in the + steps of 500 mV starting from the 4500 mV. This needs + to be enabled only on platforms where voltage needs to + be ramped up with multiple steps. + Touch-to-wake (TTW) properties: TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index adcede43cda65bca837fb4bcbfb4e73357a90276..191c283b51a145427427dc04ae1f26efcd5fcaa6 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt @@ -32,6 +32,11 @@ a GLINK SPSS edge and a SPI based edge. Definition: reference to the "rpm_hlos" mailbox in APCS, as described in mailbox/mailbox.txt +- cpu-affinity: + Usage: optional + Value type: + Definition: cores to pin the irq to + = GLINK SPSS The remote proc on a GLINK SPSS edge expects the descriptors and fifos to be allocated by this processor. The following bindings are required to inform the diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index 97492fb9fb3ebe804d98b03d921672cbe3467c5a..4e420fd46e88bb76cc0f411fad5301d426c14855 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -1517,6 +1517,7 @@ Optional properties: - qcom,auxpcm-audio-intf: Property to specify if Aux PCM interface is used for the target - qcom,tavil_codec : Property to specify if Tavil codec is used for this target - qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios. +- qcom,msm_audio_ssr_devs: List the snd event framework clients Example: sm6150_snd: sound { @@ -1620,6 +1621,7 @@ Example: qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", "SpkrLeft", "SpkrRight"; qcom,codec-aux-devs = <&wcd937x_codec>; + qcom,msm_audio_ssr_devs = <&audio_apr>, <&q6core>; }; }; diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt index cd2d2a3c4d9f4fb6c275201dc7cb6a67e80106c1..5e83e17fc5ce56ab3394cf17890266107cd07a6d 100644 --- a/Documentation/devicetree/bindings/sound/wcd_codec.txt +++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt @@ -453,6 +453,9 @@ Required properties: - clock-names : clock names defined for WSA macro - clocks : clock handles defined for WSA macro - qcom,wsa-swr-gpios: phandle for SWR data and clock GPIOs of WSA macro + - qcom,wsa-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order + required to be configured to receive interrupts + in BCL block of WSA macro Example: @@ -464,6 +467,7 @@ Example: clocks = <&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>; qcom,wsa-swr-gpios = &wsa_swr_gpios; + qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>; }; }; @@ -497,6 +501,9 @@ Required properties: - clocks : clock handles defined for RX macro - qcom,rx-swr-gpios: phandle for SWR data and clock GPIOs of RX macro - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select + - qcom,rx-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order + required to be configured to receive interrupts + in BCL block of WSA macro Example: @@ -509,6 +516,7 @@ Example: <&clock_audio_rx_2 0>; qcom,rx-swr-gpios = <&rx_swr_gpios>; qcom,rx_mclk_mode_muxsel = <0x62C25020>; + qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>; swr_1: rx_swr_master { compatible = "qcom,swr-mstr"; wcd937x_rx_slave: wcd937x-rx-slave { diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt index 8b6d5a25125f69dd22d0ee8d484d5ebdb337fc6a..026a06597048654bdc6a8af38cfcb9e2599864cf 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt +++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt @@ -31,7 +31,22 @@ Optional properties: - qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend. SPI slave nodes must be children of the SPI master node and can contain -properties described in Documentation/devicetree/bindings/spi/spi-bus.txt +the following properties. + +Required properties: +- compatible: Should contain: + "qcom,spi-msm-codec-slave" for external codec control + +- reg: Chip select address of device. + +- spi-max-frequency: Maximum SPI clocking speed of device in Hz. + +Optional properties: +- spi-cpha: Empty property indicating device requires + shifted clock phase (CPHA) mode. + +Other optional properties described in +Documentation/devicetree/bindings/spi/spi-bus.txt Example: diff --git a/Documentation/devicetree/bindings/thermal/qcom,cx-ipeak-cdev.txt b/Documentation/devicetree/bindings/thermal/qcom,cx-ipeak-cdev.txt index 0792223f83fb495a2bbb10682fbb1a77add1b942..562cc6e5f5d36ad91fe9ecbdd1e7969731513ab6 100644 --- a/Documentation/devicetree/bindings/thermal/qcom,cx-ipeak-cdev.txt +++ b/Documentation/devicetree/bindings/thermal/qcom,cx-ipeak-cdev.txt @@ -18,6 +18,24 @@ Required Parameters: register address of the CX IPEAK LM hardware and 'b' is the size of the peripheral address space. +- qcom,thermal-client-offset: + Usage: Optional + Value type: + Definition: This property is required for CX IP LM v1.1 and above + hardware. Must contain offset from CX IPEAK LM reg + base for thermal client voting. If this property is not defined, + then CX IPEAK cooling device will use legacy CXIP LM hardware + offset registers. + +- qcom,bypass-client-list: + Usage: Optional + Value type: + Definition: This property is required for CX IP LM v1.1 and above + hardware. Must contain array of offsets from CX IPEAK LM reg + base for clients those are not participating voting to CXIP LM + hardware. This property makes sense only when thermal-client + is defined. + - #cooling-cells: Usage: required Value type: @@ -29,5 +47,7 @@ Example: cxip_cdev: cxip-cdev@1fed000 { compatible = "qcom,cxip-lm-cooling-device"; reg = <0x1fed000 0x24>; + qcom,thermal-client-offset = <0x8000>; + qcom,bypass-client-list = <0x2004 0x3004>; #cooling-cells = <2>; }; diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt index 33beda5a55ba11945540b99d7031896fcda858b7..b0654130445f1447f1cab3f405f005e5f5233f9c 100644 --- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt @@ -79,6 +79,11 @@ Optional properties : are supported. If omitted, assume HW supports "1.5". - qcom,reset-ep-after-lpm-resume: If present, dbm requires ep reset after going to lpm +- qcom,host-poweroff-in-pm-suspend: If present, allow PM suspend to happen + irrespective of runtimePM state of host and power collapse the core. + This also leads to reset-resume of connected devices on PM resume. +- qcom,default-mode-host: If present, start host mode on probe for an OTG + capable DWC3 which does not have extcon handle. Sub nodes: - Sub node for "DWC3- USB3 controller". diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index 7a69b8b47b97cec6aa0cc71bb5cee2a1bb55073e..6e058fdcd80781ab4db1b80305760637c8ef8700 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -29,6 +29,9 @@ Optional properties: - clocks: reference to a clock - usb3-lpm-capable: determines if platform is USB3 LPM capable - quirk-broken-port-ped: set if the controller has broken port disable mechanism + - host-poweroff-in-pm-suspend: If set, allow PM suspend to happen irrespective + of runtimePM state of host and power collapse the core. This also leads to + reset-resume of connected devices on PM resume. Example: usb@f0931000 { diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 0753f394367ac50d1aea32ca7ba172bd9f2bc1aa..dd788a9c410ed011919502ccfe37f200ebe53980 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -120,6 +120,7 @@ faraday Faraday Technology Corporation fcs Fairchild Semiconductor firefly Firefly focaltech FocalTech Systems Co.,Ltd +fpc Fingerprint Cards AB. friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd fsl Freescale Semiconductor fujitsu Fujitsu Ltd. diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 560beaef5a7c83b99b69292943a1eb2732efccee..73fcdcd52b879132872b6f8ad062a672dd68cf0f 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -33,7 +33,7 @@ GNU C 3.2 gcc --version GNU make 3.81 make --version binutils 2.20 ld -v util-linux 2.10o fdformat --version -module-init-tools 0.9.10 depmod -V +kmod 13 depmod -V e2fsprogs 1.41.4 e2fsck -V jfsutils 1.1.3 fsck.jfs -V reiserfsprogs 3.6.3 reiserfsck -V @@ -141,12 +141,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and reproduce the Oops with that option, then you can still decode that Oops with ksymoops. -Module-Init-Tools ------------------ - -A new module loader is now in the kernel that requires ``module-init-tools`` -to use. It is backward compatible with the 2.4.x series kernels. - Mkinitrd -------- @@ -346,16 +340,17 @@ Util-linux - +Kmod +---- + +- +- + Ksymoops -------- - -Module-Init-Tools ------------------ - -- - Mkinitrd -------- diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 88ad78c6f605fb5967888f30b38c4021e0efc8ce..5d12166bd66b009f12b23b352baed949fd832d0a 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the flag KVM_VM_MIPS_VZ. -4.3 KVM_GET_MSR_INDEX_LIST +4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST -Capability: basic +Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST Architectures: x86 -Type: system +Type: system ioctl Parameters: struct kvm_msr_list (in/out) Returns: 0 on success; -1 on error Errors: + EFAULT: the msr index list cannot be read from or written to E2BIG: the msr index list is to be to fit in the array specified by the user. @@ -139,16 +140,23 @@ struct kvm_msr_list { __u32 indices[0]; }; -This ioctl returns the guest msrs that are supported. The list varies -by kvm version and host processor, but does not change otherwise. The -user fills in the size of the indices array in nmsrs, and in return -kvm adjusts nmsrs to reflect the actual number of msrs and fills in -the indices array with their numbers. +The user fills in the size of the indices array in nmsrs, and in return +kvm adjusts nmsrs to reflect the actual number of msrs and fills in the +indices array with their numbers. + +KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list +varies by kvm version and host processor, but does not change otherwise. Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are not returned in the MSR list, as different vcpus can have a different number of banks, as set via the KVM_X86_SETUP_MCE ioctl. +KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed +to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities +and processor features that are exposed via MSRs (e.g., VMX capabilities). +This list also varies by kvm version and host processor, but does not change +otherwise. + 4.4 KVM_CHECK_EXTENSION @@ -475,14 +483,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. 4.18 KVM_GET_MSRS -Capability: basic +Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) Architectures: x86 -Type: vcpu ioctl +Type: system ioctl, vcpu ioctl Parameters: struct kvm_msrs (in/out) -Returns: 0 on success, -1 on error +Returns: number of msrs successfully returned; + -1 on error + +When used as a system ioctl: +Reads the values of MSR-based features that are available for the VM. This +is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. +The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST +in a system ioctl. +When used as a vcpu ioctl: Reads model-specific registers from the vcpu. Supported msr indices can -be obtained using KVM_GET_MSR_INDEX_LIST. +be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. struct kvm_msrs { __u32 nmsrs; /* number of msrs in entries */ diff --git a/Makefile b/Makefile index af8f68cb819850fba069d68a85c76716cf020296..0226b62da213f100da0bb832392147661b1f4ea5 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 62 +SUBLEVEL = 66 EXTRAVERSION = NAME = Petit Gorille diff --git a/arch/Kconfig b/arch/Kconfig index 6ba7bbb16c68ec99e2cc24cdc463f30d5257ef0a..226b692dca5a9f97c0aa98a0d62c42aee81fcc1c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -13,6 +13,9 @@ config KEXEC_CORE config HAVE_IMA_KEXEC bool +config HOTPLUG_SMT + bool + config OPROFILE tristate "OProfile system profiling" depends on PROFILING diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 6c7eb54be9e2a4912d27cd4dcc2803a338390323..d64438bfa68b305c7dae6a4a5b42fe76e3f96caa 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -1305,7 +1305,7 @@ 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; bus-range = <0x00 0xff>; num-lanes = <1>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>, <&clks IMX6SX_CLK_PCIE_AXI>, <&clks IMX6SX_CLK_LVDS1_OUT>, diff --git a/arch/arm/configs/vendor/qcs405_defconfig b/arch/arm/configs/vendor/qcs405_defconfig index 3305c1682ce94f40eab6b1f574e03a0c0f0c8b98..a1d99420a0b894c7b96fd571d551aa9b5c3044d9 100644 --- a/arch/arm/configs/vendor/qcs405_defconfig +++ b/arch/arm/configs/vendor/qcs405_defconfig @@ -164,6 +164,11 @@ CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y CONFIG_BRIDGE_NF_EBTABLES=y CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_SNAT=y CONFIG_L2TP=y CONFIG_L2TP_DEBUGFS=y CONFIG_L2TP_V3=y @@ -231,7 +236,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_KS8851=y +CONFIG_AT803X_PHY=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -445,6 +450,7 @@ CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y # CONFIG_MSM_JTAGV8 is not set CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_ARM_MEMLAT_MON=y CONFIG_DEVFREQ_GOV_MEMLAT=y diff --git a/arch/arm/configs/vendor/sdxprairie-perf_defconfig b/arch/arm/configs/vendor/sdxprairie-perf_defconfig index 381f2fc1e2f228abe5af6e1631884583797a48dd..0c264fcbdd6e7cff57f984f9af5a4b311cd24f73 100644 --- a/arch/arm/configs/vendor/sdxprairie-perf_defconfig +++ b/arch/arm/configs/vendor/sdxprairie-perf_defconfig @@ -211,6 +211,7 @@ CONFIG_POWER_RESET=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y CONFIG_REGULATOR=y +CONFIG_REGULATOR_STUB=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y @@ -272,6 +273,7 @@ CONFIG_ECM_IPA=y CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_MSM_CLK_RPMH=y +CONFIG_GCC_SDXPRAIRIE=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_IOMMU_IO_PGTABLE_FAST=y @@ -281,6 +283,8 @@ CONFIG_IOMMU_DEBUG_TRACKING=y CONFIG_IOMMU_TESTS=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y CONFIG_QTI_RPMH_API=y CONFIG_PWM=y diff --git a/arch/arm/configs/vendor/sdxprairie_defconfig b/arch/arm/configs/vendor/sdxprairie_defconfig index 9f419abd5388705c52fbdaa9b997b288073065e5..a43bdd43db0607059056e0d11954628e128cd2d9 100644 --- a/arch/arm/configs/vendor/sdxprairie_defconfig +++ b/arch/arm/configs/vendor/sdxprairie_defconfig @@ -201,6 +201,7 @@ CONFIG_POWER_RESET=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y CONFIG_REGULATOR=y +CONFIG_REGULATOR_STUB=y CONFIG_FB=y CONFIG_SOUND=y CONFIG_SND=y @@ -262,6 +263,7 @@ CONFIG_ECM_IPA=y CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_MSM_CLK_RPMH=y +CONFIG_GCC_SDXPRAIRIE=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_IOMMU_IO_PGTABLE_FAST=y @@ -272,6 +274,8 @@ CONFIG_IOMMU_TESTS=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y CONFIG_QTI_RPMH_API=y CONFIG_PWM=y diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7d5cfe48b1cb677832e080a7de1e2cdae7250be4..4224636feb18b74aded19ac98a810e8eaf118f5a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2464,7 +2464,7 @@ static int __arm_iommu_attach_device(struct device *dev, { int err; - err = iommu_attach_device(mapping->domain, dev); + err = iommu_attach_group(mapping->domain, dev->iommu_group); if (err) return err; @@ -2672,6 +2672,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, #endif dev->archdata.dma_ops_setup = true; } +EXPORT_SYMBOL(arch_setup_dma_ops); void arch_teardown_dma_ops(struct device *dev) { diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index bee202855f3ba91dd7b7f2917c9985c54d6281e9..fe09621bfb33f4bb1144fb30c02e0e1e0fffd02e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1311,6 +1311,17 @@ config BUILD_ARM64_UNCOMPRESSED_KERNEL concatenated dtb. endchoice +config KRYO_PMU_WORKAROUND + bool "Workaround for PMU IRQ burst" + default n + depends on ARM_PMU + help + Disable Performance Monitor overflow interrupts + when handling an monitor IRQ, to avoid simultaneous + overflow interrupts from multiple monitors. + + Enable this flag for effect SoCs. + config BUILD_ARM64_DT_OVERLAY bool "enable DT overlay compilation support" depends on OF diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 1fd95d45199f5d1ab8c60fe2b90e49b6fc9b546b..fa90d3bfda7ea02b6be753ec7a5250ca34f393ef 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -185,6 +185,14 @@ config ARCH_SDMMAGPIE This enables support for the SDMMAGPIE chipset. If you do not wish to build a kernel that runs on this chipset, say 'N' here. +config ARCH_TRINKET + bool "Enable Support for Qualcomm Technologies, Inc. TRINKET" + depends on ARCH_QCOM + select COMMON_CLK_QCOM + help + This enables support for the TRINKET chipset. If you do not + wish to build a kernel that runs on this chipset, say 'N' here. + config ARCH_REALTEK bool "Realtek Platforms" help diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 2bf868632f9cdc62854ec9c9a591790d105f2f31..030345c85044e64e5fd76cbb577ee52c4757d0bc 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -6,6 +6,7 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb +ifeq ($(CONFIG_ARM64),y) dtb-$(CONFIG_ARCH_QCS405) += qcs405-rumi.dtb \ qcs405-iot-sku1.dtb \ qcs405-iot-sku2.dtb \ @@ -16,8 +17,12 @@ dtb-$(CONFIG_ARCH_QCS405) += qcs405-rumi.dtb \ qcs405-iot-sku7.dtb \ qcs405-iot-sku8.dtb \ qcs405-iot-sku9.dtb \ - qcs403-iot-sku1.dtb \ - qcs403-iot-sku2.dtb + qcs405-iot-sku10.dtb +else +dtb-$(CONFIG_ARCH_QCS405) += qcs403-iot-sku1.dtb \ + qcs403-iot-sku2.dtb \ + qcs403-iot-sku3.dtb +endif ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) dtbo-$(CONFIG_ARCH_SM8150) += \ @@ -121,14 +126,35 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sdmmagpie-idp-overlay.dtbo \ sdmmagpie-rumi-overlay.dtbo \ sdmmagpie-qrd-overlay.dtbo \ + sdmmagpiep-idp-overlay.dtbo \ + sdmmagpiep-qrd-overlay.dtbo \ + sdmmagpie-external-codec-idp-overlay.dtbo \ + sdmmagpie-usbc-idp-overlay.dtbo sdmmagpie-idp-overlay.dtbo-base := sdmmagpie.dtb sdmmagpie-rumi-overlay.dtbo-base := sdmmagpie.dtb sdmmagpie-qrd-overlay.dtbo-base := sdmmagpie.dtb +sdmmagpiep-idp-overlay.dtbo-base := sdmmagpiep.dtb +sdmmagpiep-qrd-overlay.dtbo-base := sdmmagpiep.dtb +sdmmagpie-external-codec-idp-overlay.dtbo-base := sdmmagpie.dtb +sdmmagpie-usbc-idp-overlay.dtbo-base := sdmmagpie.dtb else dtb-$(CONFIG_ARCH_SDMMAGPIE) += sdmmagpie-rumi.dtb \ sdmmagpie-idp.dtb \ - sdmmagpie-qrd.dtb + sdmmagpie-qrd.dtb \ + sdmmagpiep-idp.dtb \ + sdmmagpiep-qrd.dtb \ + sdmmagpie-external-codec-idp.dtb \ + sdmmagpie-usbc-idp.dtb +endif + +ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) + dtbo-$(CONFIG_ARCH_TRINKET) += \ + trinket-rumi-overlay.dtbo + +trinket-rumi-overlay.dtbo-base := trinket.dtb +else +dtb-$(CONFIG_ARCH_TRINKET) += trinket-rumi.dtb endif dtb-$(CONFIG_ARCH_SDXPRAIRIE) += sdxprairie-rumi.dtb \ diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi index 83efb69d45836e7bda2a791a0e52404d313ded44..595eba0b1d4cae1321a666a0922634e1151d9430 100644 --- a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi +++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi @@ -11,8 +11,8 @@ * GNU General Public License for more details. */ -qcom,alium_860_89032_0000_3600mah_averaged_masterslave_aug6th2018 { - /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Aug6th2018*/ +qcom,alium_860_89032_0000_3600mah_averaged_masterslave_sep24th2018 { + /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Sept24th2018*/ qcom,max-voltage-uv = <4350000>; qcom,fastchg-current-ma = <5400>; qcom,jeita-fcc-ranges = <0 100 2500000 @@ -21,53 +21,54 @@ qcom,alium_860_89032_0000_3600mah_averaged_masterslave_aug6th2018 { qcom,jeita-fv-ranges = <0 100 4250000 110 400 4350000 410 450 4250000>; - qcom,step-chg-ranges = <3600000 4200000 5400000 - 4201000 4300000 3600000 + qcom,step-chg-ranges = <3600000 3800000 5400000 + 3801000 4300000 3600000 4301000 4350000 2500000>; + qcom,ocv-based-step-chg; qcom,batt-id-kohm = <107>; qcom,battery-beta = <4250>; qcom,therm-room-temp = <100000>; qcom,fg-cc-cv-threshold-mv = <4340>; - qcom,battery-type = "alium_860_89032_0000_3600mah_aug6th2018"; + qcom,battery-type = "alium_860_89032_0000_3600mah_sept24th2018"; qcom,therm-coefficients = <0x2318 0xd0c 0xdaf7 0xc556 0x848d>; qcom,therm-center-offset = <0x70>; qcom,therm-pull-up = <100>; - qcom,rslow-normal-coeffs = <0xa4 0x01 0x24 0x13>; - qcom,rslow-low-coeffs = <0xa4 0x01 0x24 0x13>; - qcom,checksum = <0x99F7>; + qcom,rslow-normal-coeffs = <0xdf 0x02 0x77 0x1a>; + qcom,rslow-low-coeffs = <0x51 0x04 0xd0 0x13>; + qcom,checksum = <0x1538>; qcom,gui-version = "PM855GUI - 1.0.0.10"; qcom,fg-profile-data = [ - 09 00 BD EA - 40 CC E8 BC - DD C3 00 00 - B0 C5 72 92 - F3 87 C8 A2 - E6 9C E2 87 - 18 00 A4 01 - 24 13 47 FD - A9 F2 CE 07 - 32 00 0E E3 - 06 ED 2E EA - 83 FD 5B 14 - B8 1C 75 3A - 5C 42 CA 3A - 40 00 3A 00 - 40 00 48 00 - 3B 00 34 00 - 38 00 38 00 - 48 00 42 00 - 40 00 40 00 - 3B 00 30 00 - 32 00 39 00 - 32 00 42 00 - 5C 64 47 00 - 3B 08 40 10 - 40 00 3B 00 - 35 00 36 00 + 09 00 C7 EA + C4 DC 8E E2 + 3A DD 00 00 + 15 BC A5 8A + 02 80 D1 92 + AB 9D 47 80 + 10 00 DF 02 + 77 1A 85 EC + E1 FD CE 07 + 32 00 75 EB + AA ED F3 CD + 0C 0A 7A E4 + ED C5 40 1B + D0 02 1F CA + FF 00 52 00 + 4D 00 4A 00 3C 00 35 00 - 44 20 53 40 - 41 50 3B 13 - 40 00 D8 00 + 38 00 39 00 + 48 00 43 00 + 3F 00 FF 00 + 38 00 40 00 + 46 00 50 00 + 45 00 5C 00 + 7E 64 60 00 + 50 08 50 10 + FF 00 6A 00 + 5F 00 63 00 + 6E 00 60 00 + 7D 20 96 40 + 75 50 6B 13 + 63 00 D8 00 14 22 7E 0D 21 02 AA 04 ED 1C D4 09 @@ -82,42 +83,42 @@ qcom,alium_860_89032_0000_3600mah_averaged_masterslave_aug6th2018 { 51 23 3F 45 28 53 69 14 93 20 8E EC - 18 D3 C8 CD + 18 CB C8 C5 DB 1C 7B C9 - 7C 05 E6 BA + 7C 05 E6 C2 B9 17 2C 93 87 85 A2 92 91 A8 09 80 - 94 F2 E8 0C - 75 FB 97 EA - 00 F8 0E 04 - 11 02 F4 F7 - 75 0A 72 15 - 49 11 15 10 - 95 0B CE 03 - DD 05 59 03 + 92 F2 1A 0D + F4 FC 5E EB + 00 F8 FB ED + 15 E2 F6 0F + 75 02 72 05 + 49 01 10 00 + FA E5 E2 03 + 8D 05 85 02 CE 07 32 00 - DF 02 CD 00 - A4 00 F5 02 - 47 02 91 02 - CC 03 26 02 - 3C 04 40 00 - 37 00 3B 00 - 3F 64 41 00 - 41 10 41 18 - 42 08 3E 00 - 3F 00 40 08 - 40 08 2B 00 - 37 20 3C 40 - 41 58 4A 10 - 44 00 4B 00 - 41 08 40 00 - 40 00 40 00 - 2B 08 38 08 - 37 00 3E 20 - 4A 40 54 58 - 46 10 40 00 - 40 08 40 10 + 23 03 46 02 + 9C 04 03 02 + 48 07 0A 00 + BA 03 97 02 + 65 05 50 00 + 3A 00 41 00 + 43 64 45 00 + 45 10 45 18 + 46 08 44 00 + 47 00 3A 08 + 4B 08 37 00 + 47 20 4E 40 + 54 58 60 10 + 57 00 5F 00 + 57 08 55 00 + 4B 00 50 00 + 3E 08 52 08 + 52 00 5C 20 + 6F 40 7D 58 + 67 10 63 00 + 69 08 4F 10 D8 00 8C 2A DB 04 28 02 AD 04 0B 1D diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi index ab69f4e034f666688d690a9b8755d8faaf1296c7..552c888d1510fe7b951f6c15c41a3516b261265f 100644 --- a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi +++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi @@ -11,8 +11,8 @@ * GNU General Public License for more details. */ -qcom,mlp466076_3250mah_averaged_masterslave_mar27th2018 { - /* #mlp466076_3250mAh_averaged_MasterSlave_Mar27th2018 */ +qcom,mlp466076_3250mah_averaged_masterslave_sept24th2018 { + /* #mlp466076_3250mAh_averaged_MasterSlave_sept24th2018 */ qcom,max-voltage-uv = <4400000>; qcom,fastchg-current-ma = <6000>; /* COLD = 0 DegC, HOT = 55 DegC */ @@ -29,115 +29,117 @@ qcom,mlp466076_3250mah_averaged_masterslave_mar27th2018 { qcom,battery-beta = <4250>; qcom,therm-room-temp = <100000>; qcom,fg-cc-cv-threshold-mv = <4390>; - qcom,battery-type = "mlp466076_3250mah_mar27th2018"; + qcom,battery-type = "qrd855_mlp466076_3200mah_sept24th2018"; qcom,therm-coefficients = <0x2318 0xd0c 0xdaf7 0xc556 0x848d>; qcom,therm-center-offset = <0x70>; - qcom,checksum = <0xC5FF>; - qcom,gui-version = "PM8150GUI - 0.0.0.32"; + qcom,rslow-normal-coeffs = <0x43 0xfc 0xc9 0x12>; + qcom,rslow-low-coeffs = <0x07 0x15 0x8d 0xf5>; + qcom,checksum = <0x7F72>; + qcom,gui-version = "PM8150GUI - 1.0.0.10"; qcom,fg-profile-data = [ - 09 00 31 EA - 85 C4 5A BA - 33 AA 00 00 - EE BC 66 8B - F6 87 AC 95 - 78 9A D2 87 - 2F 00 6C 0C - EF 02 B0 04 - 41 02 CE 07 - 00 00 A6 00 - 70 07 CD 06 - 64 14 66 25 - 55 1C 06 0A - E8 3A C0 43 - 40 00 37 00 - 40 00 4D 00 - 41 00 33 00 - 38 00 3B 00 - 43 00 4C 00 - 40 00 40 00 - 3E 00 3B 00 - 34 00 32 00 - 2F 00 56 00 - 4A 64 42 00 - 49 00 40 08 - 40 00 36 00 - 36 00 40 10 - 3C 10 36 00 - 64 28 4A 48 - 3E 60 39 0C - 40 00 D8 00 - 66 20 C1 0C - C7 02 61 FE - 2A 1C EA 0B - 5A 0D A2 22 - F6 17 3A 42 - 2E 55 7F 02 - 71 13 47 20 - B9 04 2E 0B - 9E 05 D4 1C - D5 03 F7 05 - 4D 02 8C 18 - D4 22 B5 45 - 90 52 7F 14 - AE 20 60 04 - 68 CB 66 AD - DC 1C 7C D1 - B1 05 D3 BA - 78 18 AA 8A - FD 85 18 92 - 8F A0 09 80 - 43 00 82 FC - 33 03 25 02 - 00 F8 FE EC - F4 DB F1 F7 - 68 0B EA 14 - 71 20 20 18 - A6 1E BD 03 - C8 05 54 01 + 09 00 53 00 + 77 DD 12 E2 + E4 DD 00 00 + 49 BC 7B 8B + F9 87 5A 9A + A8 86 C3 87 + 29 00 43 FC + C9 12 DC 04 + 75 FB CE 07 + 32 00 43 EB + 7A ED B9 B5 + EF 0A C4 E2 + 2B BC EF 0B + 40 02 5C DB + 60 00 46 00 + 49 00 48 00 + 3B 00 30 00 + 30 00 37 00 + 41 00 43 00 + 45 00 60 00 + 40 00 3A 00 + 36 00 36 00 + 33 00 5B 00 + 4F 64 4A 00 + 4F 08 4F 08 + 60 F8 4D 00 + 4A 00 5A 08 + 59 08 4E 00 + 93 20 6A 40 + 59 58 52 10 + 59 00 D8 08 + 6A 21 E7 0D + 42 03 1F FC + 5B 1C 1C 03 + 3A 04 8B 23 + FB 17 7A 3B + B1 4C 31 02 + 85 15 3A 21 + DB 0D E5 0B + F7 04 E0 1C + 4D FB F6 04 + 8B 03 7E 18 + 9C 22 29 3C + E3 4B 8C 16 + F6 20 FE ED + C5 D3 01 D5 + D5 1C 03 CB + AF 05 DD BA + 60 18 87 92 + 73 84 70 9B + 8F 98 09 80 + 67 FA 62 05 + 58 03 C9 04 + 00 00 1A E4 + C1 02 E9 0F + E0 EB 80 A2 + 6C 1F 1C 10 + 93 04 29 02 + 8D 04 6F 02 CE 07 32 00 - 15 03 4D 02 - 90 02 35 04 - 01 03 1D 03 - 47 03 DB 02 - 52 05 40 00 - 3B 00 3E 00 - 3F 64 40 00 - 41 00 3E 08 - 43 F8 40 00 - 40 08 40 10 - 40 10 36 00 - 3F 28 42 48 - 42 60 47 0C - 38 00 39 00 - 3F 08 40 00 - 40 00 40 00 - 2F 10 3D 10 - 3B 00 42 20 - 53 40 37 58 - 3D 0E 41 00 - 40 00 40 08 - D8 00 28 20 - 94 05 44 0A - 23 0D 8F 1C - 08 22 3D 45 - C4 52 4F 18 - 41 02 26 05 - 86 02 6A 11 - 3F 0A EA 1F - 59 05 BC 02 - FA 05 A2 1C - 77 03 92 05 - A5 02 84 18 - A0 03 34 04 - C6 02 67 00 - E9 1F 2B 05 - FE 02 A8 05 - C4 1C 83 02 - F1 04 66 03 - A6 18 FC 02 - 65 05 28 03 - 71 00 61 01 + C3 01 CA 02 + 6F 07 4A 03 + 03 05 EC 04 + 3E 04 71 04 + CD 02 4A 00 + 3D 00 40 00 + 42 64 44 00 + 43 00 47 08 + 44 00 4B 00 + 50 00 4F 10 + 46 10 3C 00 + 45 20 48 40 + 48 58 4D 0E + 48 00 40 00 + 46 08 5E 00 + 61 00 48 00 + 43 08 59 08 + 5B 00 5F 20 + 74 40 5F 50 + 53 10 5B 00 + 70 00 E6 08 + D8 00 DD 20 + 9A 04 2B 0B + 97 0D C7 1C + 55 23 E3 45 + 1B 52 89 18 + B9 03 18 04 + DB 02 74 12 + 3F 0A D4 20 + 4A 04 8A 03 + 32 05 C8 1C + DF 02 66 04 + C4 03 A7 18 + 2F 03 10 05 + C6 03 76 00 + D0 20 31 04 + AA 03 0E 05 + D3 1C 33 02 + A2 05 91 02 + AB 18 D6 02 + A7 05 B2 02 + 7F 00 7F 01 C0 00 FA 00 - 38 0D 00 00 + F2 0C 00 00 ]; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi index f6eafeea4ee20d73411936590b0c4f4d0296a0c9..4e47fb8be2d53ee2d9c4f9a7b6bb30d83e3a27ef 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi @@ -37,14 +37,14 @@ #address-cells = <1>; ranges; interrupts = , + , + , , , , , , - , - , - ; + ; gfx_0_tbu: gfx_0_tbu@0x50c5000 { compatible = "qcom,qsmmuv500-tbu"; @@ -301,3 +301,15 @@ dma-coherent; }; }; + +&kgsl_smmu { + qcom,actlr = + /* All CBs of GFX: +15 deep PF */ + <0x0 0x7ff 0x303>; +}; + +&apps_smmu { + qcom,actlr = + /* HF and SF TBUs: +3 deep PF */ + <0x800 0x7ff 0x103>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi index c30e790780acb40a1347e76ca419cc97b3adbe47..737111fc0eba16d0ec82809e18fb7fae74872762 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi @@ -70,7 +70,6 @@ #iommu-cells = <2>; qcom,skip-init; qcom,use-3-lvl-tables; - qcom,disable-atos; #global-interrupts = <1>; #size-cells = <1>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi index c9b0b4a918ec852bf04220e681304ffda4909fd5..08ea46eb3b49f63cfa22a53324c9c19351a6c4a4 100644 --- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi @@ -45,6 +45,10 @@ qcom,latency-level = "ultra"; }; + trans_loopback: qcom,msm-transcode-loopback { + compatible = "qcom,msm-transcode-loopback"; + }; + compress: qcom,msm-compress-dsp { compatible = "qcom,msm-compress-dsp"; }; diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi index 5da6364a2815e47a2885404910122f6ac1401afb..973a1ffa839ef3000aa013579868f0748b530e41 100644 --- a/arch/arm64/boot/dts/qcom/pm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi @@ -55,7 +55,8 @@ pm6150_vadc: vadc@3100 { compatible = "qcom,spmi-adc5"; - reg = <0x3100 0x100>; + reg = <0x3100 0x100>, <0x3700 0x100>; + reg-names = "adc5-usr-base", "adc5-cal-base"; #address-cells = <1>; #size-cells = <0>; interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>; @@ -63,6 +64,7 @@ qcom,adc-vdd-reference = <1875>; #io-channel-cells = <1>; io-channel-ranges; + qcom,pmic-revid = <&pm6150_revid>; /* Channel nodes */ ref_gnd { @@ -116,8 +118,6 @@ chg_temp { reg = ; label = "chg_temp"; - qcom,ratiometric; - qcom,hw-settle-time = <200>; qcom,pre-scaling = <1 1>; }; diff --git a/arch/arm64/boot/dts/qcom/pm8009.dtsi b/arch/arm64/boot/dts/qcom/pm8009.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..2b52dfd21563347f45e5e8a1263fdbc1865e1690 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/pm8009.dtsi @@ -0,0 +1,52 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&spmi_bus { + qcom,pm8009@a { + compatible ="qcom,spmi-pmic"; + reg = <0xa SPMI_USID>; + #address-cells = <1>; + #size-cells = <1>; + + qcom,revid@100 { + compatible = "qcom,qpnp-revid"; + reg = <0x100 0x100>; + }; + + pm8009_pon: qcom,power-on@800 { + compatible = "qcom,qpnp-power-on"; + reg = <0x800 0x100>; + }; + + pm8009_gpios: pinctrl@c000 { + compatible = "qcom,spmi-gpio"; + reg = <0xc000 0x400>; + interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>, + <0x0 0xc1 0 IRQ_TYPE_NONE>, + <0x0 0xc3 0 IRQ_TYPE_NONE>; + interrupt-names = "pm8009_gpio1", "pm8009_gpio2", + "pm8009_gpio4"; + gpio-controller; + #gpio-cells = <2>; + qcom,gpios-disallowed = <3>; + }; + }; + + qcom,pm8009@b { + compatible = "qcom,spmi-pmic"; + reg = <0xb SPMI_USID>; + #address-cells = <1>; + #size-cells = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi index a1e857558625109ac1e415e4640dd8b05a0d3693..36ba994643f825f5d2b9db0864a6602db17ac0e6 100644 --- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi @@ -184,8 +184,8 @@ <0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>, <0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>, <0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x6 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x10 0x7 IRQ_TYPE_LEVEL_HIGH>; + <0x2 0x10 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x10 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "chgr-error", "chg-state-change", @@ -199,13 +199,13 @@ qcom,dcdc@1100 { reg = <0x1100 0x100>; interrupts = - <0x2 0x11 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x1 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x11 0x5 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x11 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x7 IRQ_TYPE_LEVEL_HIGH>; + <0x2 0x11 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "otg-fail", "otg-oc-disable-sw", @@ -220,12 +220,12 @@ reg = <0x1200 0x100>; interrupts = <0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x12 0x2 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x12 0x3 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x12 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x12 0x5 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x12 0x6 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x12 0x7 IRQ_TYPE_LEVEL_HIGH>; + <0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "bat-temp", "bat-ov", @@ -239,12 +239,12 @@ qcom,usb@1300 { reg = <0x1300 0x100>; interrupts = - <0x2 0x13 0x0 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x13 0x1 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x13 0x2 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x13 0x3 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x13 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x13 0x5 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x5 IRQ_TYPE_EDGE_BOTH>, <0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>, <0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>; @@ -261,13 +261,13 @@ qcom,dc@1400 { reg = <0x1400 0x100>; interrupts = - <0x2 0x14 0x1 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x2 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x3 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x5 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x6 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x14 0x7 IRQ_TYPE_LEVEL_HIGH>; + <0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "dcin-vashdn", "dcin-uv", @@ -282,12 +282,12 @@ reg = <0x1500 0x100>; interrupts = <0x2 0x15 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x1 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x1 IRQ_TYPE_EDGE_BOTH>, <0x2 0x15 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x3 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x3 IRQ_TYPE_EDGE_BOTH>, <0x2 0x15 0x4 IRQ_TYPE_EDGE_RISING>, <0x2 0x15 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x6 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x6 IRQ_TYPE_EDGE_BOTH>, <0x2 0x15 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "typec-or-rid-detect-change", @@ -305,11 +305,11 @@ interrupts = <0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>, <0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x2 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x16 0x3 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x16 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x16 0x6 IRQ_TYPE_EDGE_FALLING>, - <0x2 0x16 0x7 IRQ_TYPE_EDGE_RISING>; + <0x2 0x16 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x16 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x16 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x16 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x16 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "wdog-snarl", "wdog-bark", diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts index f0b91c4cf7df25be98fea7c516759d8e23f56d8a..a9ad2c5e99211c5f197bd74df618e1d530f495a7 100644 --- a/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts @@ -14,7 +14,7 @@ /dts-v1/; #include "qcs403.dtsi" -#include "qcs405-wsa-audio-overlay.dtsi" +#include "qcs405-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. QCS403 EVB2 1000 IOT"; @@ -77,3 +77,7 @@ /delete-node/ cpuss-2-step; /delete-node/ cpuss-3-step; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts index 640f251238df498e35ea4902d4ce71e1c8952889..38f8a9c8424075f42cd0f6ca5025a4a5503e3aeb 100644 --- a/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts @@ -89,3 +89,7 @@ &usb2_phy1 { status = "disabled"; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku3.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku3.dts new file mode 100644 index 0000000000000000000000000000000000000000..852276034c230977694cf33b617b7f5daa5af3d5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku3.dts @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs403.dtsi" +#include "qcs405-audio-overlay.dtsi" +/ { + model = "Qualcomm Technologies, Inc. QCS403 SSRD IOT"; + compatible = "qcom,qcs403-iot", "qcom,qcs403", "qcom,iot"; + qcom,board-id = <0x010020 0x4>; + + cpus { + /delete-node/ cpu@102; + /delete-node/ cpu@103; + + cpu-map { + cluster0 { + /delete-node/ core2; + /delete-node/ core3; + }; + }; + }; +}; + +&soc { + cpuss_dump { + /delete-node/ qcom,l1_i_cache102; + /delete-node/ qcom,l1_i_cache103; + /delete-node/ qcom,l1_d_cache102; + /delete-node/ qcom,l1_d_cache103; + }; + + qcom,spm@b012000 { + qcom,cpu-vctl-list = <&CPU0 &CPU1>; + }; + + qcom,lpm-levels { + qcom,pm-cluster@0{ + qcom,pm-cpu { + qcom,cpu = <&CPU0 &CPU1>; + }; + }; + }; + + /delete-node/ cti@61ba000; + /delete-node/ cti@61bb000; + /delete-node/ etm@61be000; + /delete-node/ etm@61bf000; + funnel@61a1000 { + ports { + /delete-node/ port@3; + /delete-node/ port@4; + }; + }; +}; + +&thermal_zones { + cpuss-max-step { + cooling-maps { + /delete-node/ cpu2_cdev; + /delete-node/ cpu3_cdev; + }; + }; + + /delete-node/ cpuss-2-step; + /delete-node/ cpuss-3-step; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi index 2a9453224eb0b4eecc5bf5408b94660b8b6f6f55..69683700a14e351777911a717476080c0cc56498 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi @@ -48,8 +48,8 @@ status = "okay"; compatible = "explore,ep92a6"; reg = <0x64>; - interrupt-parent = <&tlmm>; - interrupts = <107 IRQ_TYPE_EDGE_FALLING>; + pinctrl-names = "default"; + pinctrl-0 = <&ep_reset_n_active &ep_mute_active &ep_int_active>; }; }; @@ -61,7 +61,7 @@ qcom,auxpcm-audio-intf = <1>; qcom,spdif-audio-intf = <1>; qcom,wcn-btfm = <1>; - qcom,msm-mi2s-master = <1>, <0>, <1>, <1>, <1>; + qcom,msm-mi2s-master = <1>, <0>, <1>, <1>, <1>, <1>; qcom,ep92-name = "ep92.3-0064"; qcom,ep92-busnum = <3>; @@ -70,14 +70,14 @@ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, <&loopback>, <&compress>, <&hostless>, <&afe>, <&lsm>, <&routing>, <&compr>, - <&pcm_noirq>; + <&pcm_noirq>, <&trans_loopback>; asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1", "msm-pcm-dsp.2", "msm-voip-dsp", "msm-pcm-voice", "msm-pcm-loopback", "msm-compress-dsp", "msm-pcm-hostless", "msm-pcm-afe", "msm-lsm-client", "msm-pcm-routing", "msm-compr-dsp", - "msm-pcm-dsp-noirq"; + "msm-pcm-dsp-noirq", "msm-transcode-loopback"; asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>, <&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>, <&dai_mi2s5>, <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, @@ -164,6 +164,11 @@ }; }; +&dai_mi2s1 { + qcom,msm-mi2s-rx-lines = <0>; + qcom,msm-mi2s-tx-lines = <15>; +}; + &dai_mi2s4 { qcom,msm-dai-is-island-supported = <1>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi index 28be044c03a5d27edbc6e42d7f9f49c60252faf8..f4f616825cbdae9252a267c3f16d4defed51af03 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi @@ -126,9 +126,9 @@ CPU_COST_0: core-cost0 { busy-cost-data = < 960000 159 - 1113600 207 - 1267200 256 - 1382400 327 + 1094400 207 + 1248000 256 + 1401600 327 >; idle-cost-data = < 100 80 60 40 @@ -137,9 +137,9 @@ CLUSTER_COST_0: cluster-cost0 { busy-cost-data = < 960000 53 - 1113600 61 - 1267200 71 - 1382400 85 + 1094400 61 + 1248000 71 + 1401600 85 >; idle-cost-data = < 4 3 2 1 diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi index 1ad579c963cb396eb7f51e9258704f54578e3025..b424612259c85396d0148b4226febeeac87b471f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi @@ -55,9 +55,7 @@ pinctrl-names = "default"; pinctrl-0 = <&spdifrx_opt_default &pri_mi2s_sck_active &pri_mi2s_ws_active - &pri_mi2s_sd0_active &pri_mi2s_sd1_active - &pri_mi2s_sd2_active &pri_mi2s_sd3_active - &pri_mi2s_sd4_active &pri_mi2s_sd5_active + &pri_mi2s_sd0_active &sec_mi2s_sck_active &sec_mi2s_ws_active &sec_mi2s_sd0_active &sec_mi2s_sd1_active &sec_mi2s_sd2_active &sec_mi2s_sd3_active>; @@ -67,6 +65,10 @@ qcom,num-macros = <1>; }; +&dai_mi2s0 { + qcom,msm-mi2s-rx-lines = <0x1>; +}; + &soc { cdc_dmic01_gpios: cdc_dmic01_pinctrl { compatible = "qcom,msm-cdc-pinctrl"; diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra1.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra1.dtsi index 77ee299509a56b801fd3fdaf8f9b3635e1be1ea4..b1724376fabdbc60ceb5ddb1204d30fca0b665c4 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra1.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra1.dtsi @@ -12,6 +12,7 @@ */ &i2c_5 { + qcom,clk-freq-out = <100000>; status = "okay"; /* CSRA66X0 cluster GRP_0 */ csra66x0_ampl_68: csra66x0@68 { diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi index 76b39a57b791b2c07752b720db1f5c944ee989e0..75eb49722c96d2fd7f136eb87d64b5d74d936529 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi @@ -23,11 +23,11 @@ asoc-codec = <&stub_codec>, <&bolero>; asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; qcom,csra-max-devs = <6>; - qcom,csra-devs = <&csra66x0_ampl_68>, <&csra66x0_ampl_69>, - <&csra66x0_ampl_6A>, <&csra66x0_ampl_6B>, - <&csra66x0_ampl_68_1>, <&csra66x0_ampl_69_1>; - qcom,csra-aux-dev-prefix = "CSRA_12", "CSRA_34", "CSRA_56", - "CSRA_78", "CSRA_9A", "CSRA_BC"; + qcom,csra-devs = <&csra66x0_ampl_6B>, <&csra66x0_ampl_6A>, + <&csra66x0_ampl_69>, <&csra66x0_ampl_68>, + <&csra66x0_ampl_69_1>, <&csra66x0_ampl_68_1>; + qcom,csra-aux-dev-prefix = "CSRA_78", "CSRA_56", "CSRA_34", + "CSRA_12", "CSRA_BC", "CSRA_9A"; qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; @@ -75,6 +75,10 @@ qcom,num-macros = <1>; }; +&dai_mi2s0 { + qcom,msm-mi2s-rx-lines = <0x3f>; +}; + &soc { cdc_dmic01_gpios: cdc_dmic01_pinctrl { compatible = "qcom,msm-cdc-pinctrl"; diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra6.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra6.dtsi index 6f9018299d7e0547524efda1d2da879d752a2c28..5e9664fe60cffedafdf6946a4eea0ce3aaf3291c 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-csra6.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-csra6.dtsi @@ -12,6 +12,7 @@ */ &i2c_5 { + qcom,clk-freq-out = <100000>; status = "okay"; /* CSRA66X0 cluster GRP_0 */ csra66x0_ampl_68: csra66x0@68 { diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku10.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku10.dts new file mode 100644 index 0000000000000000000000000000000000000000..7e6e8cb84acdb9a8066d7e99b6f078e2f37b48c8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku10.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-tdm-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 TDM Mic"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x070020 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts index bbfce94ca0d88eb357268c2ee9aa7dbd342bad22..6bc5d84eb92214e68ef2e2031f026dbe89e49326 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts @@ -25,6 +25,21 @@ #include "qcs405-mdss-panels.dtsi" +&mdss_hdmi_tx { + pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active", + "hdmi_active", "hdmi_sleep"; + pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; + pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>; + pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>; + pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>; + pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; +}; + &mdss_mdp { qcom,mdss-pref-prim-intf = "dsi"; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts index 980bca7ed64492c59c02190c3b7f90236a374563..44d766243e4c52f290551c1bd3528d7c3a3ab7c1 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku9.dts @@ -23,6 +23,33 @@ qcom,board-id = <0x010015 0x0>; }; +#include "qcs405-mdss-panels.dtsi" + +&mdss_mdp { + qcom,mdss-pref-prim-intf = "dsi"; +}; + +&mdss_dsi { + hw-config = "single_dsi"; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_hx8394d_720_vid>; + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; + pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; + + qcom,platform-te-gpio = <&tlmm 41 0>; + qcom,platform-reset-gpio = <&tlmm 39 0>; + qcom,platform-bklight-en-gpio = <&tlmm 48 0>; +}; + +&dsi_hx8394d_720_vid { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_tlmm_gpio"; + qcom,mdss-dsi-bl-pmic-bank-select = <0>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + &smb1351_otg_supply { status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi index f5470864740fe0610084a32c5c14b28e7be88f15..8db0fcd2312964748be0281223e82e131eb2be79 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi @@ -245,6 +245,185 @@ }; }; + quin_mi2s_sck { + quin_mi2s_sck_sleep: quin_mi2s_sck_sleep { + mux { + pins = "gpio8"; + function = "func2"; + }; + + config { + pins = "gpio8"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_sck_active: quin_mi2s_sck_active { + mux { + pins = "gpio8"; + function = "func2"; + }; + + config { + pins = "gpio8"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quin_mi2s_ws { + quin_mi2s_ws_sleep: quin_mi2s_ws_sleep { + mux { + pins = "gpio9"; + function = "func2"; + }; + + config { + pins = "gpio9"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_ws_active: quin_mi2s_ws_active { + mux { + pins = "gpio9"; + function = "func2"; + }; + + config { + pins = "gpio9"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quin_mi2s_sd0 { + quin_mi2s_sd0_sleep: quin_mi2s_sd0_sleep { + mux { + pins = "gpio10"; + function = "func2"; + }; + + config { + pins = "gpio10"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_sd0_active: quin_mi2s_sd0_active { + mux { + pins = "gpio10"; + function = "func2"; + }; + + config { + pins = "gpio10"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quin_mi2s_sd1 { + quin_mi2s_sd1_sleep: quin_mi2s_sd1_sleep { + mux { + pins = "gpio11"; + function = "func2"; + }; + + config { + pins = "gpio11"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_sd1_active: quin_mi2s_sd1_active { + mux { + pins = "gpio11"; + function = "func2"; + }; + + config { + pins = "gpio11"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + quin_mi2s_sd2 { + quin_mi2s_sd2_sleep: quin_mi2s_sd2_sleep { + mux { + pins = "gpio12"; + function = "func2"; + }; + + config { + pins = "gpio12"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_sd2_active: quin_mi2s_sd2_active { + mux { + pins = "gpio12"; + function = "func2"; + }; + + config { + pins = "gpio12"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quin_mi2s_sd3 { + quin_mi2s_sd3_sleep: quin_mi2s_sd3_sleep { + mux { + pins = "gpio13"; + function = "func2"; + }; + + config { + pins = "gpio13"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + quin_mi2s_sd3_active: quin_mi2s_sd3_active { + mux { + pins = "gpio13"; + function = "func2"; + }; + + config { + pins = "gpio13"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + wsa_swr_clk_pin { wsa_swr_clk_sleep: wsa_swr_clk_sleep { mux { diff --git a/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi b/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi index 31243496e78bda6a3bbe33e191b5f1f12962bb85..06e7d18e97bd87b8daf128134000ad901aea561f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi @@ -83,4 +83,44 @@ }; }; }; + + mdss_hdmi_pll: qcom,mdss_hdmi_pll@0x1aa0600 { + compatible = "qcom,mdss_hdmi_pll_28lpm"; + label = "MDSS HDMI PLL"; + cell-index = <0>; + #clock-cells = <1>; + + reg = <0x1aa0600 0x49c>, + <0x0184d074 0x8>; + reg-names = "pll_base", "gdsc_base"; + + gdsc-supply = <&gdsc_mdss>; + vddx-pll-supply = <&pms405_l5>; + + clocks = <&clock_gcc GCC_MDSS_AHB_CLK>; + clock-names = "iface_clk"; + clock-rate = <0>; + + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + qcom,platform-supply-entry@1 { + reg = <1>; + qcom,supply-name = "vddx-pll"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <14300>; + qcom,supply-disable-load = <1>; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-mdss.dtsi b/arch/arm64/boot/dts/qcom/qcs405-mdss.dtsi index 09ad58f684ac97a255791bc09f2fe93b6896c3c1..e2477b7867c87983d3993dd9f42422343dfe3a6e 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-mdss.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-mdss.dtsi @@ -193,12 +193,13 @@ compatible = "qcom,mdss-fb"; }; - mdss_fb1: qcom,mdss_fb_wfd { + mdss_fb1: qcom,mdss_fb_hdmi { cell-index = <1>; compatible = "qcom,mdss-fb"; + qcom,mdss-intf = <&mdss_hdmi_tx>; }; - mdss_fb2: qcom,mdss_fb_secondary { + mdss_fb2: qcom,mdss_fb_wfd { cell-index = <2>; compatible = "qcom,mdss-fb"; }; @@ -242,8 +243,6 @@ qcom,mmss-phyreset-ctrl-offset = <0x24>; qcom,mdss-fb-map-prim = <&mdss_fb0>; - qcom,mdss-fb-map-sec = <&mdss_fb2>; - /*qcom,mdss-fb-map = <&mdss_fb0>;*/ qcom,core-supply-entries { #address-cells = <1>; #size-cells = <0>; @@ -322,11 +321,54 @@ }; }; + msm_ext_disp: qcom,msm_ext_disp { + compatible = "qcom,msm-ext-disp"; + + ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx { + compatible = "qcom,msm-ext-disp-audio-codec-rx"; + qcom,msm_ext_disp = <&msm_ext_disp>; + }; + }; + + mdss_hdmi_tx: qcom,hdmi_tx@1aa0000 { + cell-index = <0>; + compatible = "qcom,hdmi-tx"; + + reg = <0x1aa0000 0x50c>, + <0xa0000 0x6400>, + <0x1ae0000 0x28>; + reg-names = "core_physical", "qfprom_physical", "hdcp_physical"; + + hpd-gdsc-supply = <&gdsc_mdss>; + + qcom,supply-names = "hpd-gdsc"; + qcom,min-voltage-level = <0>; + qcom,max-voltage-level = <0>; + qcom,enable-load = <0>; + qcom,disable-load = <0>; + + qcom,msm_ext_disp = <&msm_ext_disp>; + + clocks = <&clock_gcc GCC_MDSS_AHB_CLK>, + <&clock_gcc_mdss MDSS_MDP_VOTE_CLK>, + <&clock_gcc GCC_MDSS_HDMI_APP_CLK>, + <&clock_gcc GCC_MDSS_HDMI_PCLK_CLK>, + <&clock_gcc HDMI_PCLK_CLK_SRC>, + <&mdss_hdmi_pll HDMI_PCLK_SRC>; + + clock-names = "hpd_iface_clk", "hpd_mdp_core_clk", + "hpd_core_clk", "core_extp_clk", + "hdmi_pclk_rcg", "ext_hdmi_pixel_clk"; + + qcom,mdss-fb-map = <&mdss_fb1>; + qcom,pluggable; + }; + qcom,mdss_wb_panel { compatible = "qcom,mdss_wb"; qcom,mdss_pan_res = <640 640>; qcom,mdss_pan_bpp = <24>; - qcom,mdss-fb-map = <&mdss_fb1>; + qcom,mdss-fb-map = <&mdss_fb2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi index cd961550d9808ccd24318a629a9fe599b634d341..eb8e248170af8cd3ce1bc9367c24c6deb066b02c 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi @@ -686,6 +686,110 @@ }; }; + mdss_hdmi_5v_active: mdss_hdmi_5v_active { + mux { + pins = "gpio109"; + function = "gpio"; + }; + + config { + pins = "gpio109"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + mdss_hdmi_5v_suspend: mdss_hdmi_5v_suspend { + mux { + pins = "gpio109"; + function = "gpio"; + }; + + config { + pins = "gpio109"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_hdmi_hpd_active: mdss_hdmi_hpd_active { + mux { + pins = "gpio106"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio106"; + bias-pull-down; + drive-strength = <16>; + }; + }; + + mdss_hdmi_hpd_suspend: mdss_hdmi_hpd_suspend { + mux { + pins = "gpio106"; + function = "hdmi_hot"; + }; + + config { + pins = "gpio106"; + bias-pull-down; + drive-strength = <2>; + }; + }; + + mdss_hdmi_ddc_active: mdss_hdmi_ddc_active { + mux { + pins = "gpio15", "gpio16"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio15", "gpio16"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_ddc_suspend: mdss_hdmi_ddc_suspend { + mux { + pins = "gpio15", "gpio16"; + function = "hdmi_ddc"; + }; + + config { + pins = "gpio15", "gpio16"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_cec_active: mdss_hdmi_cec_active { + mux { + pins = "gpio14"; + function = "hdmi_cec"; + }; + + config { + pins = "gpio14"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + mdss_hdmi_cec_suspend: mdss_hdmi_cec_suspend { + mux { + pins = "gpio14"; + function = "hdmi_cec"; + }; + + config { + pins = "gpio14"; + drive-strength = <2>; + bias-pull-up; + }; + }; + pmx_mdss: pmx_mdss { mdss_dsi_active: mdss_dsi_active { mux { @@ -1161,6 +1265,66 @@ }; }; + pri_mi2s_sd6 { + pri_mi2s_sd6_sleep: pri_mi2s_sd6_sleep { + mux { + pins = "gpio95"; + function = "i2s_1"; + }; + + config { + pins = "gpio95"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd6_active: pri_mi2s_sd6_active { + mux { + pins = "gpio95"; + function = "i2s_1"; + }; + + config { + pins = "gpio95"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_sd7 { + pri_mi2s_sd7_sleep: pri_mi2s_sd7_sleep { + mux { + pins = "gpio96"; + function = "i2s_1"; + }; + + config { + pins = "gpio96"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd7_active: pri_mi2s_sd7_active { + mux { + pins = "gpio96"; + function = "i2s_1"; + }; + + config { + pins = "gpio96"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + pca9956b_reset_gpio: pca9956b_reset_gpio { mux { pins = "gpio95"; @@ -1350,7 +1514,7 @@ /* SPDIF optical input pin */ spdifrx_opt { - spdifrx_opt_default: spdifrx_opt_default{ + spdifrx_opt_default: spdifrx_opt_default { mux { pins = "gpio119"; function = "spdifrx_opt"; @@ -1365,6 +1529,95 @@ }; }; + /* EP92 HDMI pins */ + ep_reset_n { + ep_reset_n_sleep: ep_reset_n_sleep { + mux { + pins = "gpio108"; + function = "gpio"; + }; + + config { + pins = "gpio108"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + output-high; + }; + }; + + ep_reset_n_active: ep_reset_n_active { + mux { + pins = "gpio108"; + function = "gpio"; + }; + + config { + pins = "gpio108"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + output-high; + }; + }; + }; + + ep_mute { + ep_mute_sleep: ep_mute_sleep { + mux { + pins = "gpio104"; + function = "gpio"; + }; + + config { + pins = "gpio104"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + + ep_mute_active: ep_mute_active { + mux { + pins = "gpio104"; + function = "gpio"; + }; + + config { + pins = "gpio104"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + }; + }; + }; + + ep_int { + ep_int_sleep: ep_int_sleep { + mux { + pins = "gpio107"; + function = "gpio"; + }; + + config { + pins = "gpio107"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + + ep_int_active: ep_int_active { + mux { + pins = "gpio107"; + function = "gpio"; + }; + + config { + pins = "gpio107"; + drive-strength = <8>; /* 8 mA */ + bias-disable; + }; + }; + }; + ir_in { ir_in_default: ir_in_default { mux { diff --git a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi index ca736f3c65dd0eabc8dfe94e3c9da341c83b2d35..9bfd179f96af156fd83c6a1b54f177cc8a92bc2b 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi @@ -299,7 +299,7 @@ qcom,corner2-reg-config = /* NOM => INT2 */ - < 3 0x1041041>, < 4 0x41>, + < 3 0x1041040>, < 4 0x41>, /* NOM => NOM */ <(-1) (-1)>, <(-1) (-1)>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-tdm-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-tdm-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ab43fd8efff9357c870ac9ae77c123632de7c947 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-tdm-audio-overlay.dtsi @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" +#include "qcs405-va-bolero.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-tdm-snd-card"; + qcom,va-bolero-codec = <1>; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,quin-mi2s-gpios = <&cdc_quin_mi2s_gpios>; + tdm-vdd-micb-supply = <&pms405_l7>; + qcom,tdm-vdd-micb-voltage = <1800000 1800000>; + qcom,tdm-vdd-micb-current = <13000>; + qcom,audio-routing = + "RX_BIAS", "MCLK", + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4", + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7"; + pinctrl-names = "default"; + pinctrl-0 = <&spdifrx_opt_default>; +}; + +&bolero { + qcom,num-macros = <1>; +}; + +&tdm_quin_tx { + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; +}; + +&soc { + cdc_quin_mi2s_gpios: msm_cdc_pinctrl_quin { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&quin_mi2s_sck_active &quin_mi2s_ws_active + &quin_mi2s_sd0_active &quin_mi2s_sd1_active + &quin_mi2s_sd2_active &quin_mi2s_sd3_active>; + pinctrl-1 = <&quin_mi2s_sck_sleep &quin_mi2s_ws_sleep + &quin_mi2s_sd0_sleep &quin_mi2s_sd1_sleep + &quin_mi2s_sd2_sleep &quin_mi2s_sd3_sleep>; + qcom,lpi-gpios; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 14e5dc1c87beea85979e4bd7cfb689b63537d3f6..fd78a51bc15efe39bb56c2cf2b4d0f7ecf4cf457 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -39,10 +39,10 @@ #size-cells = <2>; ranges; - removed_region0: removed_region@85800000 { + removed_region0: removed_region@85a00000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x85800000 0x0 0x700000>; + reg = <0x0 0x85a00000 0x0 0x500000>; }; smem_region: smem@85f00000 { @@ -1053,9 +1053,9 @@ }; }; - qcom_seecom: qseecom@85800000 { + qcom_seecom: qseecom@85a00000 { compatible = "qcom,qseecom"; - reg = <0x85800000 0x600000>; + reg = <0x85a00000 0x400000>; reg-names = "secapp-region"; qcom,hlos-num-ce-hw-instances = <1>; qcom,hlos-ce-hw-instance = <0>; @@ -1068,6 +1068,43 @@ qcom,qsee-reentrancy-support = <2>; }; + qcom_rng: qrng@e3000 { + compatible = "qcom,msm-rng"; + reg = <0xe3000 0x1000>; + qcom,msm-rng-iface-clk; + qcom,no-qrng-config; + qcom,msm-bus,name = "msm-rng-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 618 0 0>, /* No vote */ + <1 618 0 800>; /* 100 MB/s */ + clocks = <&clock_gcc GCC_PRNG_AHB_CLK>; + clock-names = "iface_clk"; + }; + + sdcc1_ice: sdcc1ice@7808000 { + compatible = "qcom,ice"; + reg = <0x7808000 0x8000>; + qcom,enable-ice-clk; + clock-names = "ice_core_clk_src", "ice_core_clk", + "bus_clk", "iface_clk"; + clocks = <&clock_gcc SDCC1_ICE_CORE_CLK_SRC>, + <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>, + <&clock_gcc GCC_SDCC1_AHB_CLK>, + <&clock_gcc GCC_SDCC1_APPS_CLK>; + qcom,op-freq-hz = <266666667>, <0>, <0>, <0>; + qcom,msm-bus,name = "sdcc_ice_noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 757 0 0>, /* No vote */ + <1 757 1000 0>; /* Max. bandwidth */ + qcom,bus-vector-names = "MIN", + "MAX"; + qcom,instance-type = "sdcc"; + }; + sdhc_1: sdhci@7804000 { compatible = "qcom,sdhci-msm-v5"; reg = <0x7804000 0x1000>, <0x7805000 0x1000>; @@ -1075,6 +1112,7 @@ interrupts = <0 123 0>, <0 138 0>; interrupt-names = "hc_irq", "pwr_irq"; + sdhc-msm-crypto = <&sdcc1_ice>; qcom,bus-width = <8>; qcom,large-address-bus; @@ -1109,8 +1147,11 @@ qcom,devfreq,freq-table = <50000000 200000000>; clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>, - <&clock_gcc GCC_SDCC1_APPS_CLK>; - clock-names = "iface_clk", "core_clk"; + <&clock_gcc GCC_SDCC1_APPS_CLK>, + <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>; + clock-names = "iface_clk", "core_clk", "ice_core_clk"; + + qcom,ice-clk-rates = <266666667 160000000>; qcom,nonremovable; diff --git a/arch/arm64/boot/dts/qcom/qg-batterydata-alium-3600mah.dtsi b/arch/arm64/boot/dts/qcom/qg-batterydata-alium-3600mah.dtsi index 8435d48e6a06ee9ef057aac3efd257a206e6cd5e..0b6935b6478b8b30b534860586954e173dc4087c 100644 --- a/arch/arm64/boot/dts/qcom/qg-batterydata-alium-3600mah.dtsi +++ b/arch/arm64/boot/dts/qcom/qg-batterydata-alium-3600mah.dtsi @@ -22,15 +22,21 @@ qcom,alium_860_89032_0000_3600mAh { qcom,battery-type = "Alium_860_89032_0000_3600mAh_Jun15th2018"; qcom,qg-batt-profile-ver = <100>; - qcom,jeita-fcc-ranges = <0 100 2500000 - 110 400 5400000 + qcom,jeita-fcc-ranges = <0 50 2500000 + 60 400 5400000 410 450 2500000>; - qcom,jeita-fv-ranges = <0 100 4250000 - 110 400 4350000 + qcom,jeita-fv-ranges = <0 50 4250000 + 60 400 4350000 410 450 4250000>; - qcom,step-chg-ranges = <3600000 4200000 5400000 - 4201000 4300000 3600000 + qcom,step-chg-ranges = <3600000 3800000 5400000 + 3801000 4300000 3600000 4301000 4350000 2500000>; + qcom,ocv-based-step-chg; + + /* COOL = 5 DegC, WARM = 40 DegC */ + qcom,jeita-soft-thresholds = <0x5314 0x25e3>; + /* COLD = 0 DegC, HOT = 45 DegC */ + qcom,jeita-hard-thresholds = <0x58cd 0x20b8>; qcom,fcc1-temp-lut { qcom,lut-col-legend = <0 10 25 40 50>; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi index 973cb497e701ee614882f1271d565e05bd0e02f8..86354035b0e604c4f71d8d29efbe65b320033a9d 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi @@ -9,10 +9,32 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include +#include + + +&qupv3_se6_spi { + status = "ok"; + can-controller@0 { + compatible = "qcom,nxp,mpc5746c"; + reg = <0>; + interrupt-parent = <&tlmm>; + interrupts = <40 0>; + spi-max-frequency = <5000000>; + qcom,clk-freq-mhz = <40000000>; + qcom,max-can-channels = <1>; + qcom,bits-per-word = <8>; + qcom,support-can-fd; + }; +}; &soc { qcom,lpass@62400000 { - status = "disabled"; + status = "ok"; + }; + + audio_apr: qcom,msm-audio-apr { + status = "ok"; }; qcom,glink { @@ -20,6 +42,84 @@ status = "disabled"; }; }; + emac_hw: qcom,emac@20000 { + compatible = "qcom,emac-dwc-eqos"; + qcom,arm-smmu; + reg = <0x20000 0x10000>, + <0x36000 0x100>; + reg-names = "emac-base", "rgmii-base"; + dma-bit-mask = <32>; + emac-core-version = <7>; + interrupts-extended = <&pdc 0 660 4>, <&pdc 0 661 4>, + <&tlmm 121 2>, <&pdc 0 651 4>, + <&pdc 0 652 4>, <&pdc 0 653 4>, + <&pdc 0 654 4>, <&pdc 0 655 4>, + <&pdc 0 656 4>, <&pdc 0 657 4>, + <&pdc 0 658 4>, <&pdc 0 659 4>, + <&pdc 0 668 4>, <&pdc 0 669 4>; + interrupt-names = "sbd-intr", "lpi-intr", + "phy-intr", "tx-ch0-intr", + "tx-ch1-intr", "tx-ch2-intr", + "tx-ch3-intr", "tx-ch4-intr", + "rx-ch0-intr", "rx-ch1-intr", + "rx-ch2-intr", "rx-ch3-intr", + "ptp_pps_irq_0","ptp_pps_irq_1"; + qcom,msm-bus,name = "emac"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <98 512 0 0>, <1 781 0 0>, /* No vote */ + <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + qcom,bus-vector-names = "0", "10", "100", "1000"; + clocks = <&clock_gcc GCC_EMAC_AXI_CLK>, + <&clock_gcc GCC_EMAC_PTP_CLK>, + <&clock_gcc GCC_EMAC_RGMII_CLK>, + <&clock_gcc GCC_EMAC_SLV_AHB_CLK>; + clock-names = "eth_axi_clk", "eth_ptp_clk", + "eth_rgmii_clk", "eth_slave_ahb_clk"; + qcom,phy-reset = <&tlmm 104 GPIO_ACTIVE_HIGH>; + qcom,phy-intr-redirect = <&tlmm 121 GPIO_ACTIVE_LOW>; + gdsc_emac-supply = <&emac_gdsc>; + pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", + "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", + "dev-emac-rgmii_txd2_state", "dev-emac-rgmii_txd3_state", + "dev-emac-rgmii_txc_state", "dev-emac-rgmii_tx_ctl_state", + "dev-emac-rgmii_rxd0_state", "dev-emac-rgmii_rxd1_state", + "dev-emac-rgmii_rxd2_state", "dev-emac-rgmii_rxd3_state", + "dev-emac-rgmii_rxc_state", "dev-emac-rgmii_rx_ctl_state", + "dev-emac-phy_intr", "dev-emac-phy_reset_state", + "dev-emac_pin_pps_0"; + + pinctrl-0 = <&emac_mdc>; + pinctrl-1 = <&emac_mdio>; + pinctrl-2 = <&emac_rgmii_txd0>; + pinctrl-3 = <&emac_rgmii_txd1>; + pinctrl-4 = <&emac_rgmii_txd2>; + pinctrl-5 = <&emac_rgmii_txd3>; + pinctrl-6 = <&emac_rgmii_txc>; + pinctrl-7 = <&emac_rgmii_tx_ctl>; + pinctrl-8 = <&emac_rgmii_rxd0>; + pinctrl-9 = <&emac_rgmii_rxd1>; + pinctrl-10 = <&emac_rgmii_rxd2>; + pinctrl-11 = <&emac_rgmii_rxd3>; + pinctrl-12 = <&emac_rgmii_rxc>; + pinctrl-13 = <&emac_rgmii_rx_ctl>; + pinctrl-14 = <&emac_phy_intr>; + pinctrl-15 = <&emac_phy_reset_state>; + pinctrl-16 = <&emac_pin_pps_0>; + + io-macro-info { + io-macro-bypass-mode = <0>; + io-interface = "rgmii"; + }; + emac_emb_smmu: emac_emb_smmu { + compatible = "qcom,emac-smmu-embedded"; + iommus = <&apps_smmu 0x1C0 0x0>; + qcom,iova-mapping = <0x80000000 0x40000000>; + }; + }; }; &ufsphy_mem { @@ -86,8 +186,11 @@ status = "ok"; }; -&usb0 { - dwc3@a600000 { - dr_mode = "peripheral"; - }; +&usb1 { + status = "ok"; + qcom,default-mode-host; +}; + +&qupv3_se0_2uart { + status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sa6155-audio.dtsi b/arch/arm64/boot/dts/qcom/sa6155-audio.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..4f08d6acbdaee27acd9fc710e3a90b920914dd06 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155-audio.dtsi @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,msm-dai-tdm-pri-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37120>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36864>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36866>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36868>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36870>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-pri-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37121>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36865>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36867>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36869>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36871>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-sec-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37136>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36880>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_rx_1: qcom,msm-dai-q6-tdm-sec-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36882>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_rx_2: qcom,msm-dai-q6-tdm-sec-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36884>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_rx_3: qcom,msm-dai-q6-tdm-sec-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36886>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-sec-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37137>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36881 36883 36885 36887>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36881>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_tx_1: qcom,msm-dai-q6-tdm-sec-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36883>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_tx_2: qcom,msm-dai-q6-tdm-sec-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36885>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_sec_tdm_tx_3: qcom,msm-dai-q6-tdm-sec-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36887>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-tert-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37152>; + qcom,msm-cpudai-tdm-group-num-ports = <5>; + qcom,msm-cpudai-tdm-group-port-id = <36896 36898 36900 + 36902 36904>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36896>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_rx_1: qcom,msm-dai-q6-tdm-tert-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36898>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_rx_2: qcom,msm-dai-q6-tdm-tert-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36900>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_rx_3: qcom,msm-dai-q6-tdm-tert-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36902>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_rx_4: qcom,msm-dai-q6-tdm-tert-rx-4 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36904>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-tert-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37153>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36897 36899 36901 36903>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36897>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_tx_1: qcom,msm-dai-q6-tdm-tert-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36899>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_tx_2: qcom,msm-dai-q6-tdm-tert-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36901>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_tert_tdm_tx_3: qcom,msm-dai-q6-tdm-tert-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36903>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-quat-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37168>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36912 36914 36916 36918>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36912>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_rx_1: qcom,msm-dai-q6-tdm-quat-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36914>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_rx_2: qcom,msm-dai-q6-tdm-quat-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36916>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_rx_3: qcom,msm-dai-q6-tdm-quat-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36918>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-quat-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37169>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36913 36915 36917 36919>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_quat_tdm_tx_0: qcom,msm-dai-q6-tdm-quat-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36913>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_tx_1: qcom,msm-dai-q6-tdm-quat-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36915>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_tx_2: qcom,msm-dai-q6-tdm-quat-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36917>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quat_tdm_tx_3: qcom,msm-dai-q6-tdm-quat-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36919>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-quin-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37184>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36928 36930 36932 36934>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_quin_tdm_rx_0: qcom,msm-dai-q6-tdm-quin-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36928>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_rx_1: qcom,msm-dai-q6-tdm-quin-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36930>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_rx_2: qcom,msm-dai-q6-tdm-quin-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36932>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_rx_3: qcom,msm-dai-q6-tdm-quin-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36934>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-quin-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37185>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36929 36931 36933 36935>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <1>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <0>; + dai_quin_tdm_tx_0: qcom,msm-dai-q6-tdm-quin-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36929>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_tx_1: qcom,msm-dai-q6-tdm-quin-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36931>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_tx_2: qcom,msm-dai-q6-tdm-quin-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36933>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_quin_tdm_tx_3: qcom,msm-dai-q6-tdm-quin-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36935>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; +}; + +&audio_apr { + status = "ok"; + q6core: qcom,q6core-audio { + status = "disabled"; + sm6150_snd: sound { + status = "disabled"; + }; + bolero: bolero-cdc { + status = "disabled"; + }; + }; + + sound-adp-star { + status = "ok"; + compatible = "qcom,sa8155-asoc-snd-adp-star"; + qcom,model = "sa8155-adp-star-snd-card"; + qcom,mi2s-audio-intf; + qcom,auxpcm-audio-intf; + qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>; + + asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, + <&loopback>, <&compress>, <&hostless>, + <&afe>, <&lsm>, <&routing>, <&compr>, + <&pcm_noirq>, <&loopback1>, <&pcm_dtmf>; + asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1", + "msm-pcm-dsp.2", "msm-voip-dsp", + "msm-pcm-voice", "msm-pcm-loopback", + "msm-compress-dsp", "msm-pcm-hostless", + "msm-pcm-afe", "msm-lsm-client", + "msm-pcm-routing", "msm-compr-dsp", + "msm-pcm-dsp-noirq", "msm-pcm-loopback.1", + "msm-pcm-dtmf"; + asoc-cpu = <&dai_hdmi>, <&dai_dp>, + <&dai_mi2s0>, <&dai_mi2s1>, + <&dai_mi2s2>, <&dai_mi2s3>, + <&dai_mi2s4>, <&dai_pri_auxpcm>, + <&dai_sec_auxpcm>, <&dai_tert_auxpcm>, + <&dai_quat_auxpcm>, <&dai_quin_auxpcm>, + <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, + <&afe_proxy_tx>, <&incall_record_rx>, + <&incall_record_tx>, <&incall_music_rx>, + <&incall_music_2_rx>, + <&usb_audio_rx>, <&usb_audio_tx>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>, + <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>, + <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, + <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, + <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, + <&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>, + <&dai_tert_tdm_rx_4>, <&dai_tert_tdm_tx_0>, + <&dai_tert_tdm_tx_1>, <&dai_tert_tdm_tx_2>, + <&dai_tert_tdm_tx_3>, <&dai_quat_tdm_rx_0>, + <&dai_quat_tdm_rx_1>, <&dai_quat_tdm_rx_2>, + <&dai_quat_tdm_rx_3>, <&dai_quat_tdm_tx_0>, + <&dai_quat_tdm_tx_1>, <&dai_quat_tdm_tx_2>, + <&dai_quat_tdm_tx_3>, <&dai_quin_tdm_rx_0>, + <&dai_quin_tdm_rx_1>, <&dai_quin_tdm_rx_2>, + <&dai_quin_tdm_rx_3>, <&dai_quin_tdm_tx_0>, + <&dai_quin_tdm_tx_1>, <&dai_quin_tdm_tx_2>, + <&dai_quin_tdm_tx_3>; + asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608", + "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", + "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", + "msm-dai-q6-mi2s.4", "msm-dai-q6-auxpcm.1", + "msm-dai-q6-auxpcm.2", "msm-dai-q6-auxpcm.3", + "msm-dai-q6-auxpcm.4", "msm-dai-q6-auxpcm.5", + "msm-dai-q6-dev.224", "msm-dai-q6-dev.225", + "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", + "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", + "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882", + "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886", + "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", + "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", + "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", + "msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902", + "msm-dai-q6-tdm.36904", "msm-dai-q6-tdm.36897", + "msm-dai-q6-tdm.36899", "msm-dai-q6-tdm.36901", + "msm-dai-q6-tdm.36903", "msm-dai-q6-tdm.36912", + "msm-dai-q6-tdm.36914", "msm-dai-q6-tdm.36916", + "msm-dai-q6-tdm.36918", "msm-dai-q6-tdm.36913", + "msm-dai-q6-tdm.36915", "msm-dai-q6-tdm.36917", + "msm-dai-q6-tdm.36919", "msm-dai-q6-tdm.36928", + "msm-dai-q6-tdm.36930", "msm-dai-q6-tdm.36932", + "msm-dai-q6-tdm.36934", "msm-dai-q6-tdm.36929", + "msm-dai-q6-tdm.36931", "msm-dai-q6-tdm.36933", + "msm-dai-q6-tdm.36935"; + asoc-codec = <&stub_codec>; + asoc-codec-names = "msm-stub-codec.1"; + }; +}; + +&qupv3_se4_i2c { + status = "disabled"; +}; + +&slim_aud { + status = "disabled"; + msm_dai_slim { + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi index 5351f31544d35292015fefc27e760a25ffc0ae29..2d98ee0d3ab57e39cc279a49f808b7499c39ae44 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-pmic.dtsi @@ -78,16 +78,16 @@ }; &mdss_dsi0 { - /delete-property/ vdda-1p2-supply; + vdda-1p2-supply = <&pm6155_1_l11>; }; &sde_dp { - /delete-property/ vdda-1p2-supply; - /delete-property/ vdda-0p9-supply; + vdda-1p2-supply = <&pm6155_1_l11>; + vdda-0p9-supply = <&pm6155_1_l5>; }; &mdss_dsi_phy0 { - /delete-property/ vdda-0p9-supply; + vdda-0p9-supply = <&pm6155_1_l5>; }; &cam_csiphy0 { @@ -109,7 +109,7 @@ }; &icnss { - /delete-property/ vdd-0.8-cx-mx-supply; + /delete-property/ vdd-cx-mx-supply; /delete-property/ vdd-1.8-xo-supply ; /delete-property/ vdd-1.3-rfa-supply; /delete-property/ vdd-3.3-ch0-supply; @@ -147,6 +147,7 @@ /delete-node/ rf_pa0_therm-therm; /delete-node/ camera_flash-therm; /delete-node/ quiet-therm; + /delete-node/ quiet-therm-step; /delete-node/ aoss-lowf; /delete-node/ cpuss-0-lowf; /delete-node/ cpuss-1-lowf; diff --git a/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi b/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi index 1e6fe05effea60c36829e235fdee961763039399..b89bb1e61f1fce5598b65d032238f35d31c0fa85 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-regulator.dtsi @@ -14,7 +14,7 @@ &soc { rpmh-regulator-smpa2 { - compatible = "qcom,rpmh-vrm-regulator"; + compatible = "qcom,rpmh-arc-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "cx.lvl"; diff --git a/arch/arm64/boot/dts/qcom/sa6155.dtsi b/arch/arm64/boot/dts/qcom/sa6155.dtsi index 67e3d5e5f9965e1c24bc5054f60847eee673c402..10d67f890e5c8f7307bfc9c9d4b00d5a9e494226 100644 --- a/arch/arm64/boot/dts/qcom/sa6155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155.dtsi @@ -12,6 +12,7 @@ #include "sm6150.dtsi" #include "sa6155-pmic.dtsi" +#include / { model = "Qualcomm Technologies, Inc. SA6155"; @@ -36,3 +37,198 @@ vdda18-supply = <&L12A>; vdda33-supply = <&L13A>; }; + +&tlmm { + ioexp_intr_active: ioexp_intr_active { + mux { + pins = "gpio58"; + function = "gpio"; + }; + config { + pins = "gpio58"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + ioexp_reset_active: ioexp_reset_active { + mux { + pins = "gpio3"; + function = "gpio"; + }; + config { + pins = "gpio3"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; +}; + +&sde_dp { + qcom,ext-disp = <&ext_disp>; + qcom,dp-hpd-gpio = <&ioexp 8 0>; + + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&dp_hpd_cfg_pins>; + pinctrl-1 = <&dp_hpd_cfg_pins>; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; +}; + +&qupv3_se2_i2c { + + status = "ok"; + + pinctrl-0 = <&qupv3_se2_i2c_active + &ioexp_intr_active + &ioexp_reset_active>; + + ioexp: gpio@3e { + #gpio-cells = <2>; + #interrupt-cells = <2>; + compatible = "semtech,sx1509q"; + reg = <0x3e>; + interrupt-parent = <&tlmm>; + interrupts = <58 0>; + gpio-controller; + interrupt-controller; + semtech,probe-reset; + + pinctrl-names = "default"; + pinctrl-0 = <&dsi1_hpd_cfg_pins + &dsi1_cdet_cfg_pins>; + + dsi1_hpd_cfg_pins: gpio0-cfg { + pins = "gpio0"; + bias-pull-up; + }; + + dsi1_cdet_cfg_pins: gpio1-cfg { + pins = "gpio1"; + bias-pull-down; + }; + + dp_hpd_cfg_pins: gpio8-cfg { + pins = "gpio8"; + bias-pull-down; + }; + }; + + i2c-mux@77 { + compatible = "nxp,pca9542"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + anx_7625_1: anx7625@2c { + compatible = "analogix,anx7625"; + reg = <0x2c>; + interrupt-parent = <&ioexp>; + interrupts = <0 0>; + cbl_det-gpio = <&ioexp 1 0>; + power_en-gpio = <&tlmm 4 0>; + reset_n-gpio = <&tlmm 5 0>; + }; + }; + }; +}; + +&anx_7625_1 { + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + anx_7625_1_in: endpoint { + remote-endpoint = <&dsi_anx_7625_1_out>; + }; + }; + }; +}; + +#include "dsi-panel-ext-bridge-1080p.dtsi" + +&soc { + dsi_anx_7625_1: qcom,dsi-display@17 { + label = "dsi_anx_7625_1"; + qcom,dsi-display-active; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_ext_bridge_1080p>; + }; + + dsi_dp1: qcom,dsi-display@1 { + compatible = "qcom,dsi-display"; + label = "primary"; + + qcom,dsi-ctrl = <&mdss_dsi0>; + qcom,dsi-phy = <&mdss_dsi_phy0>; + + clocks = <&mdss_dsi0_pll BYTE0_MUX_CLK>, + <&mdss_dsi0_pll PIX0_MUX_CLK>; + clock-names = "src_byte_clk0", "src_pixel_clk0"; + + + qcom,dsi-display-list = + <&dsi_anx_7625_1>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_anx_7625_1_out: endpoint { + remote-endpoint = <&anx_7625_1_in>; + }; + }; + }; + }; + + sde_wb: qcom,wb-display@0 { + compatible = "qcom,wb-display"; + cell-index = <0>; + label = "wb_display"; + }; + + ext_disp: qcom,msm-ext-disp { + compatible = "qcom,msm-ext-disp"; + + ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx { + compatible = "qcom,msm-ext-disp-audio-codec-rx"; + }; + }; +}; + +&mdss_dsi_phy0 { + qcom,panel-force-clock-lane-hs; +}; + +&mdss_dsi0_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + +&mdss_mdp { + connectors = <&sde_rscc &dsi_dp1 &sde_wb>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts index ba038632f1e8dc62efa89ba139748a822a23aee1..c892af7c15c32a767425a15ab99b660c3b1d835a 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts @@ -14,6 +14,7 @@ /plugin/; #include +#include #include "sa6155-adp-star.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa6155p.dtsi b/arch/arm64/boot/dts/qcom/sa6155p.dtsi index 369144d9c23c17c900450d45f67c25639ee6bfc1..c7b6136015c696670f009f584ae097d34421254f 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155p.dtsi @@ -12,7 +12,9 @@ #include "sm6150.dtsi" #include "sa6155-pmic.dtsi" +#include +#include "sm6150-camera-sensor-adp.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P"; qcom,msm-name = "SA6155P"; @@ -57,3 +59,201 @@ vdda18-supply = <&L12A>; vdda33-supply = <&L13A>; }; + +&tlmm { + ioexp_intr_active: ioexp_intr_active { + mux { + pins = "gpio58"; + function = "gpio"; + }; + config { + pins = "gpio58"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + ioexp_reset_active: ioexp_reset_active { + mux { + pins = "gpio3"; + function = "gpio"; + }; + config { + pins = "gpio3"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; +}; + +&sde_dp { + qcom,ext-disp = <&ext_disp>; + qcom,dp-hpd-gpio = <&ioexp 8 0>; + + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&dp_hpd_cfg_pins>; + pinctrl-1 = <&dp_hpd_cfg_pins>; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; +}; + +&qupv3_se2_i2c { + + status = "ok"; + + pinctrl-0 = <&qupv3_se2_i2c_active + &ioexp_intr_active + &ioexp_reset_active>; + + ioexp: gpio@3e { + #gpio-cells = <2>; + #interrupt-cells = <2>; + compatible = "semtech,sx1509q"; + reg = <0x3e>; + interrupt-parent = <&tlmm>; + interrupts = <58 0>; + gpio-controller; + interrupt-controller; + semtech,probe-reset; + + pinctrl-names = "default"; + pinctrl-0 = <&dsi1_hpd_cfg_pins + &dsi1_cdet_cfg_pins>; + + dsi1_hpd_cfg_pins: gpio0-cfg { + pins = "gpio0"; + bias-pull-up; + }; + + dsi1_cdet_cfg_pins: gpio1-cfg { + pins = "gpio1"; + bias-pull-down; + }; + + dp_hpd_cfg_pins: gpio8-cfg { + pins = "gpio8"; + bias-pull-down; + }; + }; + + i2c-mux@77 { + compatible = "nxp,pca9542"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + anx_7625_1: anx7625@2c { + compatible = "analogix,anx7625"; + reg = <0x2c>; + interrupt-parent = <&ioexp>; + interrupts = <0 0>; + cbl_det-gpio = <&ioexp 1 0>; + power_en-gpio = <&tlmm 4 0>; + reset_n-gpio = <&tlmm 5 0>; + }; + }; + }; +}; + +&anx_7625_1 { + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + anx_7625_1_in: endpoint { + remote-endpoint = <&dsi_anx_7625_1_out>; + }; + }; + }; +}; + +#include "dsi-panel-ext-bridge-1080p.dtsi" + +&soc { + dsi_anx_7625_1: qcom,dsi-display@17 { + label = "dsi_anx_7625_1"; + qcom,dsi-display-active; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_ext_bridge_1080p>; + }; + + dsi_dp1: qcom,dsi-display@1 { + compatible = "qcom,dsi-display"; + label = "primary"; + + qcom,dsi-ctrl = <&mdss_dsi0>; + qcom,dsi-phy = <&mdss_dsi_phy0>; + + clocks = <&mdss_dsi0_pll BYTE0_MUX_CLK>, + <&mdss_dsi0_pll PIX0_MUX_CLK>; + clock-names = "src_byte_clk0", "src_pixel_clk0"; + + + qcom,dsi-display-list = + <&dsi_anx_7625_1>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_anx_7625_1_out: endpoint { + remote-endpoint = <&anx_7625_1_in>; + }; + }; + }; + }; + + sde_wb: qcom,wb-display@0 { + compatible = "qcom,wb-display"; + cell-index = <0>; + label = "wb_display"; + }; + + ext_disp: qcom,msm-ext-disp { + compatible = "qcom,msm-ext-disp"; + + ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx { + compatible = "qcom,msm-ext-disp-audio-codec-rx"; + }; + }; +}; + +&mdss_dsi_phy0 { + qcom,panel-force-clock-lane-hs; +}; + +&mdss_dsi0_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + +&mdss_mdp { + connectors = <&sde_rscc &dsi_dp1 &sde_wb>; +}; + +/* Audio device tree */ +#include "sa6155-audio.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8155-audio.dtsi b/arch/arm64/boot/dts/qcom/sa8155-audio.dtsi index 033c83a825c64a2801da98846cdb57fb8238dac2..a208adb1b827afc53559787378a39d9eaf0749de 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-audio.dtsi @@ -91,15 +91,19 @@ qcom,msm-dai-tdm-sec-rx { compatible = "qcom,msm-dai-tdm"; qcom,msm-cpudai-tdm-group-id = <37136>; - qcom,msm-cpudai-tdm-group-num-ports = <4>; - qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>; + qcom,msm-cpudai-tdm-group-num-ports = <5>; + qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 + 36886 36894>; qcom,msm-cpudai-tdm-clk-rate = <12288000>; - qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-clk-internal = <0>; qcom,msm-cpudai-tdm-sync-mode = <1>; - qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-sync-src = <0>; qcom,msm-cpudai-tdm-data-out = <0>; qcom,msm-cpudai-tdm-invert-sync = <0>; qcom,msm-cpudai-tdm-data-delay = <0>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&sec_tdm_active &sec_tdm_dout_active>; + pinctrl-1 = <&sec_tdm_sleep &sec_tdm_dout_sleep>; dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 { compatible = "qcom,msm-dai-q6-tdm"; qcom,msm-cpudai-tdm-dev-id = <36880>; @@ -123,6 +127,12 @@ qcom,msm-cpudai-tdm-dev-id = <36886>; qcom,msm-cpudai-tdm-data-align = <0>; }; + + dai_sec_tdm_rx_7: qcom,msm-dai-q6-tdm-sec-rx-7 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36894>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; }; qcom,msm-dai-tdm-sec-tx { @@ -131,12 +141,15 @@ qcom,msm-cpudai-tdm-group-num-ports = <4>; qcom,msm-cpudai-tdm-group-port-id = <36881 36883 36885 36887>; qcom,msm-cpudai-tdm-clk-rate = <12288000>; - qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-clk-internal = <0>; qcom,msm-cpudai-tdm-sync-mode = <1>; - qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-sync-src = <0>; qcom,msm-cpudai-tdm-data-out = <0>; qcom,msm-cpudai-tdm-invert-sync = <0>; qcom,msm-cpudai-tdm-data-delay = <0>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&sec_tdm_din_active>; + pinctrl-1 = <&sec_tdm_din_sleep>; dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 { compatible = "qcom,msm-dai-q6-tdm"; qcom,msm-cpudai-tdm-dev-id = <36881>; @@ -175,6 +188,9 @@ qcom,msm-cpudai-tdm-data-out = <0>; qcom,msm-cpudai-tdm-invert-sync = <0>; qcom,msm-cpudai-tdm-data-delay = <0>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&tert_tdm_active &tert_tdm_dout_active>; + pinctrl-1 = <&tert_tdm_sleep &tert_tdm_dout_sleep>; dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 { compatible = "qcom,msm-dai-q6-tdm"; qcom,msm-cpudai-tdm-dev-id = <36896>; @@ -209,8 +225,9 @@ qcom,msm-dai-tdm-tert-tx { compatible = "qcom,msm-dai-tdm"; qcom,msm-cpudai-tdm-group-id = <37153>; - qcom,msm-cpudai-tdm-group-num-ports = <4>; - qcom,msm-cpudai-tdm-group-port-id = <36897 36899 36901 36903>; + qcom,msm-cpudai-tdm-group-num-ports = <5>; + qcom,msm-cpudai-tdm-group-port-id = <36897 36899 36901 + 36903 36911>; qcom,msm-cpudai-tdm-clk-rate = <12288000>; qcom,msm-cpudai-tdm-clk-internal = <1>; qcom,msm-cpudai-tdm-sync-mode = <1>; @@ -218,6 +235,9 @@ qcom,msm-cpudai-tdm-data-out = <0>; qcom,msm-cpudai-tdm-invert-sync = <0>; qcom,msm-cpudai-tdm-data-delay = <0>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&tert_tdm_din_active>; + pinctrl-1 = <&tert_tdm_din_sleep>; dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 { compatible = "qcom,msm-dai-q6-tdm"; qcom,msm-cpudai-tdm-dev-id = <36897>; @@ -241,6 +261,12 @@ qcom,msm-cpudai-tdm-dev-id = <36903>; qcom,msm-cpudai-tdm-data-align = <0>; }; + + dai_tert_tdm_tx_7: qcom,msm-dai-q6-tdm-tert-tx-7 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36911>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; }; qcom,msm-dai-tdm-quat-rx { @@ -437,13 +463,14 @@ <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>, <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>, - <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, - <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, - <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, - <&dai_tert_tdm_rx_2>, <&dai_tert_tdm_rx_3>, - <&dai_tert_tdm_rx_4>, <&dai_tert_tdm_tx_0>, - <&dai_tert_tdm_tx_1>, <&dai_tert_tdm_tx_2>, - <&dai_tert_tdm_tx_3>, <&dai_quat_tdm_rx_0>, + <&dai_sec_tdm_rx_7>, <&dai_sec_tdm_tx_0>, + <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, + <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, + <&dai_tert_tdm_rx_1>, <&dai_tert_tdm_rx_2>, + <&dai_tert_tdm_rx_3>, <&dai_tert_tdm_rx_4>, + <&dai_tert_tdm_tx_0>, <&dai_tert_tdm_tx_1>, + <&dai_tert_tdm_tx_2>, <&dai_tert_tdm_tx_3>, + <&dai_tert_tdm_tx_7>, <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_rx_1>, <&dai_quat_tdm_rx_2>, <&dai_quat_tdm_rx_3>, <&dai_quat_tdm_tx_0>, <&dai_quat_tdm_tx_1>, <&dai_quat_tdm_tx_2>, @@ -469,13 +496,14 @@ "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882", "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886", - "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", - "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", - "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", - "msm-dai-q6-tdm.36900", "msm-dai-q6-tdm.36902", - "msm-dai-q6-tdm.36904", "msm-dai-q6-tdm.36897", - "msm-dai-q6-tdm.36899", "msm-dai-q6-tdm.36901", - "msm-dai-q6-tdm.36903", "msm-dai-q6-tdm.36912", + "msm-dai-q6-tdm.36894", "msm-dai-q6-tdm.36881", + "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", + "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", + "msm-dai-q6-tdm.36898", "msm-dai-q6-tdm.36900", + "msm-dai-q6-tdm.36902", "msm-dai-q6-tdm.36904", + "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36899", + "msm-dai-q6-tdm.36901", "msm-dai-q6-tdm.36903", + "msm-dai-q6-tdm.36911", "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36914", "msm-dai-q6-tdm.36916", "msm-dai-q6-tdm.36918", "msm-dai-q6-tdm.36913", "msm-dai-q6-tdm.36915", "msm-dai-q6-tdm.36917", @@ -499,3 +527,13 @@ status = "disabled"; }; }; + +&qupv3_se22_spi { + status = "ok"; + spi_codec@0 { + compatible = "qcom,spi-msm-codec-slave"; + reg = <0>; + spi-max-frequency = <10000000>; + spi-cpha; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi b/arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi index b255a856f7a92c25c8c7caa9d42e9f7b7a24b31d..16873c1c64c006c2e80963f5268b85bf66e9fa4b 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi @@ -710,4 +710,26 @@ compatible = "qcom,stub-regulator"; regulator-name = "vreg_wlan"; }; + + /* PWR_CTR1_VDD_PA supply */ + vreg_conn_pa: vreg_conn_pa { + compatible = "regulator-fixed"; + regulator-name = "vreg_conn_pa"; + pinctrl-names = "default"; + pinctrl-0 = <&conn_power_pa_active>; + startup-delay-us = <4000>; + enable-active-high; + gpio = <&tlmm 173 0>; + }; + + /* PWR_CTR2_VDD_1P8 supply */ + vreg_conn_1p8: vreg_conn_1p8 { + compatible = "regulator-fixed"; + regulator-name = "vreg_conn_1p8"; + pinctrl-names = "default"; + pinctrl-0 = <&conn_power_1p8_active>; + startup-delay-us = <4000>; + enable-active-high; + gpio = <&tlmm 174 0>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155-v2.dtsi b/arch/arm64/boot/dts/qcom/sa8155-v2.dtsi index 5f13249b2f5c749170e3fb2ab7990dfa8b47890d..df8ace3d0fc1bebe28d8a414b6272036614d020c 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-v2.dtsi @@ -19,6 +19,10 @@ qcom,msm-id = <362 0x20000>; }; +&emac_hw { + emac-core-version = <4>; +}; + &ufsphy_mem { vdda-phy-supply = <&pm8150_1_l5>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155.dtsi b/arch/arm64/boot/dts/qcom/sa8155.dtsi index 9208410e9ada241bd327b9796c691b2ee0504670..c87d88e815dc31eb6ab62deaf6aee4e37b6eacfc 100644 --- a/arch/arm64/boot/dts/qcom/sa8155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155.dtsi @@ -62,7 +62,12 @@ compatible = "qca,qca6174"; pinctrl-names = "default"; pinctrl-0 = <&bt_en_active>; - qca,bt-reset-gpio = <&tlmm 172 0>; /* BT_EN */ + /* BT_EN */ + qca,bt-reset-gpio = <&tlmm 172 0>; + /* PWR_CTR1_VDD_PA */ + qca,bt-vdd-pa-supply = <&vreg_conn_pa>; + /* PWR_CTR2_VDD_1P8 */ + qca,bt-chip-pwd-supply = <&vreg_conn_1p8>; status = "disabled"; }; }; @@ -93,6 +98,9 @@ &pcie0 { vreg-1.8-supply = <&pm8150_2_l8>; vreg-0.9-supply = <&pm8150_2_l18>; + qcom,no-l1-supported; + qcom,no-l1ss-supported; + qcom,no-aux-clk-sync; }; &pcie1 { @@ -611,6 +619,14 @@ qcom,panel-force-clock-lane-hs; }; +&mdss_dsi0_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + +&mdss_dsi1_pll { + /delete-property/ qcom,dsi-pll-ssc-en; +}; + &mdss_mdp { connectors = <&sde_rscc &dsi_dp1 &dsi_dp2 &sde_dp &sde_wb>; qcom,sde-mixer-display-pref = "primary", "none", "none", @@ -645,10 +661,10 @@ qcom,msm-bus,num-paths = <2>; qcom,msm-bus,vectors-KBps = <98 512 0 0>, <1 781 0 0>, /* No vote */ - <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ - <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ - <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ - qcom,bus-vector-names = "10", "100", "1000"; + <98 512 2500 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 25000 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 250000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + qcom,bus-vector-names = "0", "10", "100", "1000"; clocks = <&clock_gcc GCC_EMAC_AXI_CLK>, <&clock_gcc GCC_EMAC_PTP_CLK>, <&clock_gcc GCC_EMAC_RGMII_CLK>, @@ -671,7 +687,9 @@ "dev-emac-rgmii_rxd2_state", "dev-emac-rgmii_rxd3_state", "dev-emac-rgmii_rxc_state", - "dev-emac-rgmii_rx_ctl_state"; + "dev-emac-rgmii_rx_ctl_state", + "dev-emac-phy_intr", + "dev-emac-phy_reset_state"; pinctrl-0 = <&emac_mdc>; pinctrl-1 = <&emac_mdio>; @@ -689,6 +707,8 @@ pinctrl-11 = <&emac_rgmii_rxd3>; pinctrl-12 = <&emac_rgmii_rxc>; pinctrl-13 = <&emac_rgmii_rx_ctl>; + pinctrl-14 = <&emac_phy_intr>; + pinctrl-15 = <&emac_phy_reset_state>; io-macro-info { io-macro-bypass-mode = <0>; @@ -696,7 +716,6 @@ }; emac_emb_smmu: emac_emb_smmu { compatible = "qcom,emac-smmu-embedded"; - qcom,smmu-s1-bypass; iommus = <&apps_smmu 0x3C0 0x0>; qcom,iova-mapping = <0x80000000 0x40000000>; }; @@ -706,6 +725,8 @@ compatible = "qcom,cnss"; wlan-en-gpio = <&tlmm 169 0>; vdd-wlan-supply = <&vreg_wlan>; + vdd-wlan-ctrl1-supply = <&vreg_conn_pa>; + vdd-wlan-ctrl2-supply = <&vreg_conn_1p8>; reg = <0x10000000 0x10000000>, <0x20000000 0x10000>; reg-names = "smmu_iova_base", "smmu_iova_ipa"; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..a180cdd6fc2b9396018f4e17f54798cb0369fc18 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-audio-overlay.dtsi @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm6150-audio-overlay.dtsi" + +&swr0 { + interrupts = <0 295 0>; +}; + +&swr1 { + interrupts = <0 297 0>; +}; + +&swr2 { + interrupts = <0 296 0>, <0 528 0>; +}; + +&sm6150_snd { + qcom,model = "sm6150-wcd9375-snd-card"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-audio.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-audio.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ef1c61a7c73dfa62b35dd580a97e79d79425288b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-audio.dtsi @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm6150-audio.dtsi" + +&msm_audio_ion { + iommus = <&apps_smmu 0x1b21 0x0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..212c9bcdaaf60bdabcdf1fce0b23daa4cf0167e6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp-overlay.dts @@ -0,0 +1,26 @@ + +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-idp.dtsi" + +/ { + model = "External Audio Codec IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,msm-id = <365 0x0>; + qcom,board-id = <34 1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..8f3fc1b03dab533de376244057a79c17fea8e83c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-external-codec-idp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" +#include "sdmmagpie-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE PM6150 External Audio Codec IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,board-id = <34 1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts index 74a472a6a4283c45c503562559db4c148da3b2e3..c2a273648b809588c15cb48279497c18a705a9f7 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts @@ -16,10 +16,15 @@ #include #include "sdmmagpie-idp.dtsi" +#include "sdmmagpie-audio-overlay.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE IDP"; + model = "IDP"; compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; qcom,msm-id = <365 0x0>; qcom,board-id = <34 0>; }; + +&dsi_sw43404_amoled_video_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts index 273174c8c44140f88fa6269de9c0a7008131e06f..03c4bc1d9c89d71be3f8677c3f95c56abf3e7d1b 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts @@ -16,7 +16,7 @@ #include "sdmmagpie-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE IDP"; + model = "Qualcomm Technologies, Inc. SDMMAGPIE PM6150 IDP"; compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; qcom,board-id = <34 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi index 3367bd3c8dad25d3c5c3e11ca466781b1a1f96aa..7f465facd2166591b746fb1d4dc73ff0cdfb60a0 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi @@ -9,8 +9,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include "sdmmagpie-thermal-overlay.dtsi" #include +#include "sdmmagpie-sde-display.dtsi" &soc { }; @@ -76,7 +78,70 @@ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; - cd-gpios = <&tlmm 69 GPIO_ACTIVE_HIGH>; + cd-gpios = <&tlmm 69 GPIO_ACTIVE_LOW>; status = "ok"; }; + +&dsi_sw43404_amoled_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_sw43404_amoled_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_sw43404_amoled_fhd_plus_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_sim_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_sim_vid { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_dual_sim_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_dual_sim_vid { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_sim_dsc_375_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; + +&dsi_dual_sim_dsc_375_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-npu.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-npu.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..5737b78e04a7bd73c1c71ca9fb3bbefddcc75b59 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-npu.dtsi @@ -0,0 +1,182 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + msm_npu: qcom,msm_npu@9800000 { + compatible = "qcom,msm-npu"; + reg = <0x9800000 0x800000>; + reg-names = "npu_base"; + interrupts = , + , + ; + interrupt-names = "error_irq", "wdg_bite_irq", "ipc_irq"; + iommus = <&apps_smmu 0x1461 0x0>; + cache-slice-names = "npu"; + cache-slices = <&llcc 23>; + + clocks = <&clock_npucc NPU_CC_CAL_DP_CLK>, + <&clock_npucc NPU_CC_XO_CLK>, + <&clock_npucc NPU_CC_ARMWIC_CORE_CLK>, + <&clock_npucc NPU_CC_BTO_CORE_CLK>, + <&clock_npucc NPU_CC_BWMON_CLK>, + <&clock_npucc NPU_CC_CAL_DP_CDC_CLK>, + <&clock_npucc NPU_CC_COMP_NOC_AXI_CLK>, + <&clock_npucc NPU_CC_CONF_NOC_AHB_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_APB_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_ATB_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_CTI_CLK>, + <&clock_npucc NPU_CC_NPU_CPC_CLK>, + <&clock_npucc NPU_CC_NPU_CPC_TIMER_CLK>, + <&clock_npucc NPU_CC_PERF_CNT_CLK>, + <&clock_npucc NPU_CC_QTIMER_CORE_CLK>, + <&clock_npucc NPU_CC_SLEEP_CLK>, + <&clock_aop QDSS_CLK>; + clock-names = "cal_dp_clk", + "xo_clk", + "armwic_core_clk", + "bto_core_clk", + "bwmon_clk", + "cal_dp_cdc_clk", + "comp_noc_axi_clk", + "conf_noc_ahb_clk", + "npu_core_apb_clk", + "npu_core_atb_clk", + "npu_core_clk", + "npu_core_cti_clk", + "npu_cpc_clk", + "npu_cpc_timer_clk", + "perf_cnt_clk", + "qtimer_core_clk", + "sleep_clk", + "qdss_clk"; + vdd-supply = <&npu_core_gdsc>; + vdd_cx-supply = <&VDD_CX_LEVEL>; + qcom,proxy-reg-names ="vdd", "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + mboxes = <&qmp_npu0 0>, <&qmp_npu1 0>; + mbox-names = "npu_low", "npu_high"; + + #cooling-cells = <2>; + qcom,npu-pwrlevels { + #address-cells = <1>; + #size-cells = <0>; + compatible = "qcom,npu-pwrlevels"; + initial-pwrlevel = <4>; + qcom,npu-pwrlevel@0 { + reg = <0>; + clk-freq = <300000000 + 19200000 + 100000000 + 19200000 + 19200000 + 300000000 + 150000000 + 30000000 + 19200000 + 60000000 + 100000000 + 37500000 + 100000000 + 19200000 + 300000000 + 19200000 + 0 + 0>; + }; + qcom,npu-pwrlevel@1 { + reg = <1>; + clk-freq = <400000000 + 19200000 + 150000000 + 19200000 + 19200000 + 400000000 + 200000000 + 37500000 + 19200000 + 120000000 + 150000000 + 75000000 + 150000000 + 19200000 + 400000000 + 19200000 + 0 + 0>; + }; + qcom,npu-pwrlevel@2 { + reg = <2>; + clk-freq = <466500000 + 19200000 + 200000000 + 19200000 + 19200000 + 466500000 + 300000000 + 37500000 + 19200000 + 120000000 + 200000000 + 75000000 + 200000000 + 19200000 + 466500000 + 19200000 + 0 + 0>; + }; + qcom,npu-pwrlevel@3 { + reg = <3>; + clk-freq = <600000000 + 19200000 + 300000000 + 19200000 + 19200000 + 600000000 + 403000000 + 75000000 + 19200000 + 240000000 + 300000000 + 150000000 + 300000000 + 19200000 + 600000000 + 19200000 + 0 + 0>; + }; + qcom,npu-pwrlevel@4 { + reg = <4>; + clk-freq = <700000000 + 19200000 + 400000000 + 19200000 + 19200000 + 700000000 + 533000000 + 75000000 + 19200000 + 300000000 + 400000000 + 150000000 + 400000000 + 19200000 + 700000000 + 19200000 + 0 + 0>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi index 9aaf3f5a673041f70f962cbff5e922f19ebf75a0..11d01c8c0e3a18e6c5cda39349744a689f49536e 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi @@ -866,6 +866,184 @@ }; }; }; + + wsa_swr_clk_pin { + wsa_swr_clk_sleep: wsa_swr_clk_sleep { + mux { + pins = "gpio49"; + function = "WSA_CLK"; + }; + + config { + pins = "gpio49"; + drive-strength = <2>; + bias-bus-hold; + }; + }; + + wsa_swr_clk_active: wsa_swr_clk_active { + mux { + pins = "gpio49"; + function = "WSA_CLK"; + }; + + config { + pins = "gpio49"; + drive-strength = <2>; + bias-bus-hold; + }; + }; + }; + + wsa_swr_data_pin { + wsa_swr_data_sleep: wsa_swr_data_sleep { + mux { + pins = "gpio50"; + function = "WSA_DATA"; + }; + + config { + pins = "gpio50"; + drive-strength = <4>; + bias-bus-hold; + }; + }; + + wsa_swr_data_active: wsa_swr_data_active { + mux { + pins = "gpio50"; + function = "WSA_DATA"; + }; + + config { + pins = "gpio50"; + drive-strength = <4>; + bias-bus-hold; + }; + }; + }; + + /* WSA speaker reset pins */ + spkr_1_sd_n { + spkr_1_sd_n_sleep: spkr_1_sd_n_sleep { + mux { + pins = "gpio51"; + function = "gpio"; + }; + + config { + pins = "gpio51"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + + spkr_1_sd_n_active: spkr_1_sd_n_active { + mux { + pins = "gpio51"; + function = "gpio"; + }; + + config { + pins = "gpio51"; + drive-strength = <16>; /* 16 mA */ + bias-disable; + output-high; + }; + }; + }; + + spkr_2_sd_n { + spkr_2_sd_n_sleep: spkr_2_sd_n_sleep { + mux { + pins = "gpio52"; + function = "gpio"; + }; + + config { + pins = "gpio52"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + + spkr_2_sd_n_active: spkr_2_sd_n_active { + mux { + pins = "gpio52"; + function = "gpio"; + }; + + config { + pins = "gpio52"; + drive-strength = <16>; /* 16 mA */ + bias-disable; + output-high; + }; + }; + }; + + wcd9xxx_intr { + wcd_intr_default: wcd_intr_default{ + mux { + pins = "gpio58"; + function = "gpio"; + }; + + config { + pins = "gpio58"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + input-enable; + }; + }; + }; + + fsa_usbc_ana_en_n@42 { + fsa_usbc_ana_en: fsa_usbc_ana_en { + mux { + pins = "gpio42"; + function = "gpio"; + }; + + config { + pins = "gpio42"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + }; + + pmx_sde_te { + sde_te_active: sde_te_active { + mux { + pins = "gpio10"; + function = "mdp_vsync"; + }; + + config { + pins = "gpio10"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + + sde_te_suspend: sde_te_suspend { + mux { + pins = "gpio10"; + function = "mdp_vsync"; + }; + + config { + pins = "gpio10"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + }; + /* SDC pin type */ sdc1_clk_on: sdc1_clk_on { config { diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts index b497ca644406fee3721140f9812319508602c001..5c1d9f3945e2f99dc886240d5130222882bdd839 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts @@ -18,7 +18,7 @@ #include "sdmmagpie-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE QRD"; + model = "QRD"; compatible = "qcom,sdmmagpie-qrd", "qcom,sdmmagpie", "qcom,qrd"; qcom,msm-id = <365 0x0>; qcom,board-id = <11 0>; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts index f7e9368e05867a27c4cb0ed08ec0ffcd375292e1..6738bb875b84a23731f1b51f5e54a63c82a0a8de 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts @@ -16,7 +16,7 @@ #include "sdmmagpie-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE QRD"; + model = "Qualcomm Technologies, Inc. SDMMAGPIE PM6150 QRD"; compatible = "qcom,sdmmagpie-qrd", "qcom,sdmmagpie", "qcom,qrd"; qcom,board-id = <11 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi index 3367bd3c8dad25d3c5c3e11ca466781b1a1f96aa..a858466d4031c2ee3fd040f646754e1d02c2f690 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi @@ -11,6 +11,7 @@ */ #include +#include "sdmmagpie-thermal-overlay.dtsi" &soc { }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi index ce55cc63ee7720385c5acf40eaaaf19e4445887d..688682d0673843cc1a04d989329770df682158cc 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi @@ -12,7 +12,6 @@ #include -/* TODO: Update volatge range once PGA is locked */ &soc { /* RPMh regulators */ /* PM6150 S2 = VDD_GFX supply */ @@ -62,6 +61,14 @@ qcom,init-voltage-level = ; }; + + mx_cdev: mx-cdev-lvl { + compatible = "qcom,regulator-cooling-device"; + regulator-cdev-supply = <&VDD_MX_LEVEL>; + regulator-levels = ; + #cooling-cells = <2>; + }; }; rpmh-regulator-smpc1 { @@ -71,9 +78,9 @@ S1C: pm6150l_s1: regulator-pm6150l-s1 { regulator-name = "pm6150l_s1"; qcom,set = ; - regulator-min-microvolt = <1128000>; - regulator-max-microvolt = <1128000>; - qcom,init-voltage = <1128000>; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1200000>; + qcom,init-voltage = <1000000>; }; }; @@ -110,6 +117,13 @@ ; qcom,min-dropout-voltage-level = <(-1)>; }; + + cx_cdev: regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "cx"; + #cooling-cells = <2>; + }; }; /* pm6150l S7 = VDD_MSS supply */ @@ -138,9 +152,34 @@ S8C: pm6150l_s8: regulator-pm6150l-s8 { regulator-name = "pm6150l_s8"; qcom,set = ; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1400000>; - qcom,init-voltage = <1200000>; + regulator-min-microvolt = <1120000>; + regulator-max-microvolt = <1408000>; + qcom,init-voltage = <1120000>; + }; + }; + + rpmh-regulator-smpf1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpf1"; + S1F: pm8009_s1: regulator-pm8009-s1 { + regulator-name = "pm8009_s1"; + qcom,set = ; + regulator-min-microvolt = <1064000>; + regulator-max-microvolt = <1360000>; + qcom,init-voltage = <1064000>; + }; + }; + + rpmh-regulator-smpf2 { + compatible = "qcom,rpmh-xob-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpf2"; + S2F: pm8009_s2: regulator-pm8009-s2 { + regulator-name = "pm8009_s2"; + qcom,set = ; + regulator-min-microvolt = <2900000>; + regulator-max-microvolt = <2900000>; }; }; @@ -156,9 +195,10 @@ L1A: pm6150_l1: regulator-pm6150-l1 { regulator-name = "pm6150_l1"; qcom,set = ; - regulator-min-microvolt = <1174000>; - regulator-max-microvolt = <1252000>; - qcom,init-voltage = <1174000>; + regulator-min-microvolt = <1096000>; + regulator-max-microvolt = <1304000>; + qcom,init-voltage = <1096000>; + qcom,init-mode = ; }; }; @@ -175,7 +215,7 @@ regulator-name = "pm6150_l2"; qcom,set = ; regulator-min-microvolt = <944000>; - regulator-max-microvolt = <1050000>; + regulator-max-microvolt = <1056000>; qcom,init-voltage = <944000>; qcom,init-mode = ; }; @@ -194,7 +234,7 @@ regulator-name = "pm6150_l3"; qcom,set = ; regulator-min-microvolt = <968000>; - regulator-max-microvolt = <1060000>; + regulator-max-microvolt = <1064000>; qcom,init-voltage = <968000>; qcom,init-mode = ; }; @@ -467,21 +507,14 @@ }; rpmh-regulator-ldoa18 { - compatible = "qcom,rpmh-vrm-regulator"; + compatible = "qcom,rpmh-xob-regulator"; mboxes = <&apps_rsc 0>; qcom,resource-name = "ldoa18"; - qcom,regulator-type = "pmic5-ldo"; - qcom,supported-modes = - ; - qcom,mode-threshold-currents = <0 1>; L18A: pm6150_l18: regulator-pm6150-l18 { regulator-name = "pm6150_l18"; qcom,set = ; regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3008000>; - qcom,init-voltage = <3000000>; - qcom,init-mode = ; + regulator-max-microvolt = <3000000>; }; }; @@ -743,4 +776,113 @@ qcom,init-mode = ; }; }; + + rpmh-regulator-ldof1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof1"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L1F: pm8009_l1: regulator-pm8009-l1 { + regulator-name = "pm8009_l1"; + qcom,set = ; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <1304000>; + qcom,init-voltage = <1100000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldof2 { + compatible = "qcom,rpmh-xob-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof2"; + L2F: pm8009_l2: regulator-pm8009-l2 { + regulator-name = "pm8009_l2"; + qcom,set = ; + regulator-min-microvolt = <1040000>; + regulator-max-microvolt = <1040000>; + }; + }; + + rpmh-regulator-ldof4 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof4"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L4F: pm8009_l4: regulator-pm8009-l4 { + regulator-name = "pm8009_l4"; + qcom,set = ; + regulator-min-microvolt = <1096000>; + regulator-max-microvolt = <1304000>; + qcom,init-voltage = <1096000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldof5 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof5"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L5F: pm8009_l5: regulator-pm8009-l5 { + regulator-name = "pm8009_l5"; + qcom,set = ; + regulator-min-microvolt = <2696000>; + regulator-max-microvolt = <2904000>; + qcom,init-voltage = <2696000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldof6 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof6"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L6F: pm8009_l6: regulator-pm8009-l6 { + regulator-name = "pm8009_l6"; + qcom,set = ; + regulator-min-microvolt = <2696000>; + regulator-max-microvolt = <2904000>; + qcom,init-voltage = <2696000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldof7 { + compatible = "qcom,rpmh-xob-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldof7"; + L7F: pm8009_l7: regulator-pm8009-l7 { + regulator-name = "pm8009_l7"; + qcom,set = ; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1696000>; + }; + }; + + refgen: refgen-regulator@ff1000 { + compatible = "qcom,refgen-regulator"; + reg = <0xff1000 0x60>; + regulator-name = "refgen"; + regulator-enable-ramp-delay = <5>; + proxy-supply = <&refgen>; + qcom,proxy-consumer-enable; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts index d83c3519baacea67c9add1e4ca7b75d7b2343824..5d1ccea284e1b06a88f68f2d90a058d93a6835fd 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts @@ -18,7 +18,7 @@ #include "sdmmagpie-rumi.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE RUMI"; + model = "RUMI"; compatible = "qcom,sdmmagpie-rumi", "qcom,sdmmagpie", "qcom,rumi"; qcom,msm-id = <365 0x0>; qcom,board-id = <15 0>; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts index ba9b05c9a9bfcb7d85cf7ad33cf48ee12ca93511..0534580ad2c663c741425b4a96648a909b9c278d 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts @@ -16,7 +16,7 @@ #include "sdmmagpie-rumi.dtsi" / { - model = "Qualcomm Technologies, Inc. SDMMAGPIE RUMI"; + model = "Qualcomm Technologies, Inc. SDMMAGPIE PM6150 RUMI"; compatible = "qcom,sdmmagpie-rumi", "qcom,sdmmagpie", "qcom,rumi"; qcom,board-id = <15 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi index 1df64a4350c4323dcfe8f10846d3b195a6279ff8..9ea638381869f660694b04e2208f8b41eb92a9d7 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi @@ -11,6 +11,26 @@ */ &soc { + usb_emu_phy: usb_emu_phy@a720000 { + compatible = "qcom,usb-emu-phy"; + reg = <0x0a720000 0x9500>, + <0x0a6f8800 0x100>; + reg-names = "base", "qcratch_base"; + + qcom,emu-init-seq = <0xfff0 0x4 + 0xfff3 0x4 + 0x40 0x4 + 0xfff3 0x4 + 0xfff0 0x4 + 0x100000 0x20 + 0x0 0x20 + 0x1a0 0x20 + 0x100000 0x3c + 0x0 0x3c + 0x10060 0x3c + 0x0 0x4>; + }; + timer { clock-frequency = <1000000>; }; @@ -131,4 +151,42 @@ status = "ok"; }; +&thermal_zones { + /delete-node/ aoss-0-lowf; + /delete-node/ cpu-0-0-lowf; + /delete-node/ cpu-1-0-lowf; + /delete-node/ gpuss-0-lowf; + /delete-node/ cwlan-lowf; + /delete-node/ audio-lowf; + /delete-node/ ddr-lowf; + /delete-node/ q6-hvx-lowf; + /delete-node/ camera-lowf; + /delete-node/ mdm-core-lowf; + /delete-node/ mdm-dsp-lowf; + /delete-node/ npu-lowf; + /delete-node/ video-lowf; +}; + +&usb0 { + /delete-property/ iommus; + /delete-property/ qcom,smmu-s1-bypass; + /delete-property/ USB3_GDSC-supply; + /delete-property/ extcon; + dwc3@a600000 { + usb-phy = <&usb_emu_phy>, <&usb_nop_phy>; + maximum-speed = "high-speed"; + }; + qcom,usbbam@a704000 { + status = "disabled"; + }; +}; + +&qusb_phy0 { + status = "disabled"; +}; + +&usb_qmp_dp_phy { + status = "disabled"; +}; + #include "sdmmagpie-stub-regulator.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-sde-display.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..95c5842c7643ee9421bedff51fe3a10a292267a3 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-sde-display.dtsi @@ -0,0 +1,420 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi-panel-sim-video.dtsi" +#include "dsi-panel-sim-cmd.dtsi" +#include "dsi-panel-sim-dsc375-cmd.dtsi" +#include "dsi-panel-sim-dualmipi-video.dtsi" +#include "dsi-panel-sim-dualmipi-cmd.dtsi" +#include "dsi-panel-sim-dualmipi-dsc375-cmd.dtsi" +#include "dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi" +#include "dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi" +#include "dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi" +#include + +&soc { + dsi_panel_pwr_supply: dsi_panel_pwr_supply { + #address-cells = <1>; + #size-cells = <0>; + + qcom,panel-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <62000>; + qcom,supply-disable-load = <80>; + qcom,supply-post-on-sleep = <20>; + }; + + qcom,panel-supply-entry@1 { + reg = <1>; + qcom,supply-name = "lab"; + qcom,supply-min-voltage = <4600000>; + qcom,supply-max-voltage = <6000000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + }; + + qcom,panel-supply-entry@2 { + reg = <2>; + qcom,supply-name = "ibb"; + qcom,supply-min-voltage = <4600000>; + qcom,supply-max-voltage = <6000000>; + qcom,supply-enable-load = <100000>; + qcom,supply-disable-load = <100>; + qcom,supply-post-on-sleep = <20>; + }; + }; + + dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb { + #address-cells = <1>; + #size-cells = <0>; + + qcom,panel-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1800000>; + qcom,supply-max-voltage = <1800000>; + qcom,supply-enable-load = <62000>; + qcom,supply-disable-load = <80>; + qcom,supply-post-on-sleep = <20>; + }; + }; + + dsi_panel_pwr_supply_labibb_amoled: dsi_panel_pwr_supply_labibb_amoled { + #address-cells = <1>; + #size-cells = <0>; + + qcom,panel-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vddio"; + qcom,supply-min-voltage = <1904000>; + qcom,supply-max-voltage = <1904000>; + qcom,supply-enable-load = <32000>; + qcom,supply-disable-load = <80>; + }; + + qcom,panel-supply-entry@1 { + reg = <1>; + qcom,supply-name = "vdda-3p3"; + qcom,supply-min-voltage = <3008000>; + qcom,supply-max-voltage = <3008000>; + qcom,supply-enable-load = <13200>; + qcom,supply-disable-load = <80>; + }; + }; + + dsi_sw43404_amoled_video_display: qcom,dsi-display@0 { + label = "dsi_sw43404_amoled_video_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sw43404_amoled_video>; + }; + + dsi_sw43404_amoled_cmd_display: qcom,dsi-display@1 { + label = "dsi_sw43404_amoled_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sw43404_amoled_cmd>; + }; + + dsi_sw43404_amoled_fhd_plus_cmd_display: qcom,dsi-display@2 { + label = "dsi_sw43404_amoled_fhd_plus_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sw43404_amoled_fhd_plus_cmd>; + }; + + dsi_sim_vid_display: qcom,dsi-display@3 { + label = "dsi_sim_vid_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sim_vid>; + }; + + dsi_dual_sim_vid_display: qcom,dsi-display@4 { + label = "dsi_dual_sim_vid_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0 1>; + qcom,dsi-phy-num = <0 1>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_dual_sim_vid>; + }; + + dsi_sim_cmd_display: qcom,dsi-display@5 { + label = "dsi_sim_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sim_cmd>; + }; + + dsi_dual_sim_cmd_display: qcom,dsi-display@6 { + label = "dsi_dual_sim_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0 1>; + qcom,dsi-phy-num = <0 1>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_dual_sim_cmd>; + }; + + dsi_sim_dsc_375_cmd_display: qcom,dsi-display@7 { + label = "dsi_sim_dsc_375_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sim_dsc_375_cmd>; + }; + + dsi_dual_sim_dsc_375_cmd_display: qcom,dsi-display@8 { + label = "dsi_dual_sim_dsc_375_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0 1>; + qcom,dsi-phy-num = <0 1>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_dual_sim_dsc_375_cmd>; + }; + + sde_dsi: qcom,dsi-display { + compatible = "qcom,dsi-display"; + + qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; + qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + + clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>, + <&mdss_dsi0_pll PCLK_MUX_0_CLK>, + <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>, + <&mdss_dsi1_pll PCLK_MUX_1_CLK>; + clock-names = "src_byte_clk0", "src_pixel_clk0", + "src_byte_clk1", "src_pixel_clk1"; + + pinctrl-names = "panel_active", "panel_suspend"; + pinctrl-0 = <&sde_te_active>; + pinctrl-1 = <&sde_te_suspend>; + + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&pm6150l_gpios 9 0>; + + vddio-supply = <&pm6150_l13>; + vdda-3p3-supply = <&pm6150_l18>; + lab-supply = <&lcdb_ldo_vreg>; + ibb-supply = <&lcdb_ncp_vreg>; + + qcom,dsi-display-list = + <&dsi_sw43404_amoled_video_display + &dsi_sw43404_amoled_cmd_display + &dsi_sw43404_amoled_fhd_plus_cmd_display + &dsi_sim_vid_display + &dsi_dual_sim_vid_display + &dsi_sim_cmd_display + &dsi_dual_sim_cmd_display + &dsi_sim_dsc_375_cmd_display + &dsi_dual_sim_dsc_375_cmd_display>; + }; + + sde_wb: qcom,wb-display@0 { + compatible = "qcom,wb-display"; + cell-index = <0>; + label = "wb_display"; + }; +}; + +&sde_dp { + qcom,dp-usbpd-detection = <&pm6150_pdphy>; +}; + +&mdss_mdp { + connectors = <&sde_wb &sde_dsi>; +}; + +&dsi_sw43404_amoled_video { + qcom,mdss-dsi-t-clk-post = <0x0A>; + qcom,mdss-dsi-t-clk-pre = <0x21>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 13 05 04 1F 1E 05 + 05 03 02 04 00]; + qcom,display-topology = <1 1 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sw43404_amoled_cmd { + qcom,mdss-dsi-t-clk-post = <0x0A>; + qcom,mdss-dsi-t-clk-pre = <0x21>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 13 05 04 1F 1E 05 + 05 03 02 04 00]; + qcom,display-topology = <1 1 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sw43404_amoled_fhd_plus_cmd { + qcom,mdss-dsi-t-clk-post = <0x0A>; + qcom,mdss-dsi-t-clk-pre = <0x21>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 13 05 04 1F 1E 05 + 05 03 02 04 00]; + qcom,display-topology = <1 1 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + + +&dsi_sim_vid { + qcom,mdss-dsi-t-clk-post = <0x0D>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 1C 07 07 23 21 07 + 07 05 02 04 00]; + qcom,display-topology = <1 0 1>, + <2 0 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_dual_sim_vid { + qcom,mdss-dsi-t-clk-post = <0x0D>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 1C 07 07 23 21 07 + 07 05 02 04 00]; + qcom,display-topology = <2 0 2>, + <1 0 2>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sim_cmd { + qcom,mdss-dsi-t-clk-post = <0x0C>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + qcom,ulps-enabled; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 + 07 04 02 04 00]; + qcom,display-topology = <1 1 1>, + <2 2 1>; + qcom,default-topology-index = <1>; + qcom,panel-roi-alignment = <720 40 720 40 720 40>; + qcom,partial-update-enabled = "single_roi"; + }; + + timing@1{ + qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 + 07 04 02 04 00]; + qcom,display-topology = <1 1 1>, + <2 2 1>; + qcom,default-topology-index = <1>; + qcom,panel-roi-alignment = <540 40 540 40 540 40>; + qcom,partial-update-enabled = "single_roi"; + }; + + timing@2{ + qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 + 07 04 02 04 00]; + qcom,display-topology = <1 1 1>, + <2 2 1>; + qcom,default-topology-index = <1>; + qcom,panel-roi-alignment = <360 40 360 40 360 40>; + qcom,partial-update-enabled = "single_roi"; + }; + }; +}; + +&dsi_dual_sim_cmd { + qcom,mdss-dsi-t-clk-post = <0x0D>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,ulps-enabled; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09 + 09 06 02 04 00]; + qcom,display-topology = <2 0 2>; + qcom,default-topology-index = <0>; + }; + timing@1{ + qcom,mdss-dsi-panel-phy-timings = [00 1C 07 07 23 21 07 + 07 05 02 04 00]; + qcom,display-topology = <2 0 2>, + <1 0 2>; + qcom,default-topology-index = <0>; + }; + timing@2{ + qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 + 06 04 02 04 00]; + qcom,display-topology = <2 0 2>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sim_dsc_375_cmd { + qcom,mdss-dsi-t-clk-post = <0x0D>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,ulps-enabled; + qcom,mdss-dsi-display-timings { + timing@0 { /* 1080p */ + qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 + 07 04 02 04 00]; + qcom,display-topology = <1 1 1>; + qcom,default-topology-index = <0>; + }; + timing@1 { /* qhd */ + qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1F 05 + 05 03 02 04 00]; + qcom,display-topology = <1 1 1>, + <2 2 1>, /* dsc merge */ + <2 1 1>; /* 3d mux */ + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_dual_sim_dsc_375_cmd { + qcom,mdss-dsi-t-clk-post = <0x0D>; + qcom,mdss-dsi-t-clk-pre = <0x2D>; + qcom,ulps-enabled; + qcom,mdss-dsi-display-timings { + timing@0 { /* qhd */ + qcom,mdss-dsi-panel-phy-timings = [00 1C 07 07 23 21 07 + 07 05 02 04 00]; + qcom,display-topology = <2 2 2>; + qcom,default-topology-index = <0>; + }; + timing@1 { /* 4k */ + qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 + 06 04 02 04 00]; + qcom,display-topology = <2 2 2>; + qcom,default-topology-index = <0>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-sde-pll.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..19070c4c3f6b8cc1b4fd3ed0feca07b2d5b91813 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-sde-pll.dtsi @@ -0,0 +1,110 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94a00 { + compatible = "qcom,mdss_dsi_pll_10nm"; + label = "MDSS DSI 0 PLL"; + cell-index = <0>; + #clock-cells = <1>; + reg = <0xae94a00 0x1e0>, + <0xae94400 0x800>, + <0xaf03000 0x8>; + reg-names = "pll_base", "phy_base", "gdsc_base"; + clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>; + clock-names = "iface_clk"; + clock-rate = <0>; + gdsc-supply = <&mdss_core_gdsc>; + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96a00 { + compatible = "qcom,mdss_dsi_pll_10nm"; + label = "MDSS DSI 1 PLL"; + cell-index = <1>; + #clock-cells = <1>; + reg = <0xae96a00 0x1e0>, + <0xae96400 0x800>, + <0xaf03000 0x8>; + reg-names = "pll_base", "phy_base", "gdsc_base"; + clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>; + clock-names = "iface_clk"; + clock-rate = <0>; + gdsc-supply = <&mdss_core_gdsc>; + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dp_pll: qcom,mdss_dp_pll@ae90000 { + status = "disabled"; + compatible = "qcom,mdss_dp_pll_10nm"; + label = "MDSS DP PLL"; + cell-index = <0>; + #clock-cells = <1>; + + reg = <0x088ea000 0x200>, + <0x088eaa00 0x200>, + <0x088ea200 0x200>, + <0x088ea600 0x200>, + <0xaf03000 0x8>; + reg-names = "pll_base", "phy_base", "ln_tx0_base", + "ln_tx1_base", "gdsc_base"; + + gdsc-supply = <&mdss_core_gdsc>; + + clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; + clock-names = "iface_clk", "ref_clk_src", "ref_clk", + "cfg_ahb_clk", "pipe_clk"; + clock-rate = <0>; + + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + + }; + }; + +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..d32e93de4aad5631a857a8ccbf7988d355f3a3b5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi @@ -0,0 +1,670 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + mdss_mdp: qcom,mdss_mdp@ae00000 { + compatible = "qcom,sde-kms"; + reg = <0x0ae00000 0x84208>, + <0x0aeb0000 0x2008>, + <0x0aeac000 0x214>; + reg-names = "mdp_phys", + "vbif_phys", + "regdma_phys"; + + clocks = + <&clock_gcc GCC_DISP_AHB_CLK>, + <&clock_gcc GCC_DISP_HF_AXI_CLK>, + <&clock_gcc GCC_DISP_SF_AXI_CLK>, + <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, + <&clock_dispcc DISP_CC_MDSS_MDP_CLK>, + <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>, + <&clock_dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&clock_dispcc DISP_CC_MDSS_ROT_CLK>; + clock-names = "gcc_iface", "gcc_bus", "gcc_nrt_bus", + "iface_clk", "core_clk", "vsync_clk", + "lut_clk", "rot_clk"; + clock-rate = <0 0 0 0 300000000 19200000 200000000 + 200000000>; + clock-max-rate = <0 0 0 0 430000000 19200000 430000000 + 430000000>; + + sde-vdd-supply = <&mdss_core_gdsc>; + + /* interrupt config */ + interrupts = <0 83 0>; + interrupt-controller; + #interrupt-cells = <1>; + iommus = <&apps_smmu 0x800 0x440>; + + #address-cells = <1>; + #size-cells = <0>; + + #power-domain-cells = <0>; + + /* hw blocks */ + qcom,sde-off = <0x1000>; + qcom,sde-len = <0x45c>; + + qcom,sde-ctl-off = <0x2000 0x2200 0x2400 + 0x2600 0x2800 0x2a00>; + qcom,sde-ctl-size = <0x1e0>; + qcom,sde-ctl-display-pref = "primary", "none", "none", + "none", "none"; + + qcom,sde-mixer-off = <0x45000 0x46000 0x47000 + 0x48000 0x0 0x0>; + qcom,sde-mixer-size = <0x320>; + qcom,sde-mixer-display-pref = "primary", "primary", "none", + "none", "none", "none"; + + qcom,sde-mixer-cwb-pref = "none", "none", "cwb", + "cwb", "none", "none"; + + qcom,sde-dspp-top-off = <0x1300>; + qcom,sde-dspp-top-size = <0x80>; + qcom,sde-dspp-off = <0x55000 0x57000>; + qcom,sde-dspp-size = <0x1800>; + + qcom,sde-dest-scaler-top-off = <0x00061000>; + qcom,sde-dest-scaler-top-size = <0x1c>; + qcom,sde-dest-scaler-off = <0x800 0x1000>; + qcom,sde-dest-scaler-size = <0xa0>; + + qcom,sde-wb-off = <0x66000>; + qcom,sde-wb-size = <0x2c8>; + qcom,sde-wb-xin-id = <6>; + qcom,sde-wb-id = <2>; + qcom,sde-wb-clk-ctrl = <0x3b8 24>; + + qcom,sde-intf-off = <0x6b000 0x6b800 + 0x6c000 0x6c800>; + qcom,sde-intf-size = <0x2b8>; + qcom,sde-intf-type = "dp", "dsi", "dsi", "dp"; + + qcom,sde-pp-off = <0x71000 0x71800 + 0x72000 0x72800>; + qcom,sde-pp-slave = <0x0 0x0 0x0 0x0>; + qcom,sde-pp-size = <0xd4>; + qcom,sde-pp-merge-3d-id = <0x0 0x0 0x1 0x1>; + + qcom,sde-merge-3d-off = <0x84000 0x84100>; + qcom,sde-merge-3d-size = <0x100>; + + qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0>; + + qcom,sde-cdm-off = <0x7a200>; + qcom,sde-cdm-size = <0x224>; + + qcom,sde-dsc-off = <0x81000 0x81400>; + qcom,sde-dsc-size = <0x140>; + + qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>; + qcom,sde-dither-version = <0x00010000>; + qcom,sde-dither-size = <0x20>; + + qcom,sde-sspp-type = "vig", "vig", "dma", "dma", "dma"; + + qcom,sde-sspp-off = <0x5000 0x7000 0x25000 0x27000 + 0x29000>; + qcom,sde-sspp-src-size = <0x1f0>; + + qcom,sde-sspp-xin-id = <0 4 1 5 9>; + qcom,sde-sspp-excl-rect = <1 1 1 1 1>; + qcom,sde-sspp-smart-dma-priority = <4 5 1 2 3>; + qcom,sde-smart-dma-rev = "smart_dma_v2p5"; + + qcom,sde-mixer-pair-mask = <2 1 4 3 0 0>; + + qcom,sde-mixer-blend-op-off = <0x20 0x38 0x50 0x68 0x80 0x98 + 0xb0 0xc8 0xe0 0xf8 0x110>; + + qcom,sde-max-per-pipe-bw-kbps = <4500000 + 4500000 4500000 + 4500000 4500000>; + + /* offsets are relative to "mdp_phys + qcom,sde-off */ + qcom,sde-sspp-clk-ctrl = + <0x2ac 0>, <0x2b4 0>, <0x2ac 8>, <0x2b4 8>, + <0x2bc 8>; + qcom,sde-sspp-csc-off = <0x1a00>; + qcom,sde-csc-type = "csc-10bit"; + qcom,sde-qseed-type = "qseedv3lite"; + qcom,sde-sspp-qseed-off = <0xa00>; + qcom,sde-mixer-linewidth = <2560>; + qcom,sde-sspp-linewidth = <2880>; + qcom,sde-wb-linewidth = <2880>; + qcom,sde-mixer-blendstages = <0xb>; + qcom,sde-highest-bank-bit = <0x1>; + qcom,sde-ubwc-version = <0x200>; + qcom,sde-panic-per-pipe; + qcom,sde-has-cdp; + qcom,sde-has-src-split; + qcom,sde-pipe-order-version = <0x1>; + qcom,sde-has-dim-layer; + qcom,sde-has-idle-pc; + qcom,sde-has-dest-scaler; + qcom,sde-max-dest-scaler-input-linewidth = <2048>; + qcom,sde-max-dest-scaler-output-linewidth = <2560>; + qcom,sde-max-bw-low-kbps = <12800000>; + qcom,sde-max-bw-high-kbps = <12800000>; + qcom,sde-min-core-ib-kbps = <2400000>; + qcom,sde-min-llcc-ib-kbps = <800000>; + qcom,sde-min-dram-ib-kbps = <800000>; + qcom,sde-dram-channels = <2>; + qcom,sde-num-nrt-paths = <0>; + qcom,sde-dspp-ad-version = <0x00040000>; + qcom,sde-dspp-ad-off = <0x28000 0x27000>; + + qcom,sde-vbif-off = <0>; + qcom,sde-vbif-size = <0x1040>; + qcom,sde-vbif-id = <0>; + qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>; + qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>; + + qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>; + qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>; + + /* macrotile & macrotile-qseed has the same configs */ + qcom,sde-danger-lut = <0x0000000f 0x0000ffff + 0x00000000 0x00000000 0x0000ffff>; + + qcom,sde-safe-lut-linear = <0 0xfff8>; + qcom,sde-safe-lut-macrotile = <0 0xf000>; + /* same as safe-lut-macrotile */ + qcom,sde-safe-lut-macrotile-qseed = <0 0xf000>; + qcom,sde-safe-lut-nrt = <0 0xffff>; + qcom,sde-safe-lut-cwb = <0 0xffff>; + + qcom,sde-qos-lut-linear = <0 0x00112222 0x22223357>; + qcom,sde-qos-lut-macrotile = <0 0x00112233 0x44556677>; + qcom,sde-qos-lut-macrotile-qseed = <0 0x00112233 0x66777777>; + qcom,sde-qos-lut-nrt = <0 0x00000000 0x00000000>; + qcom,sde-qos-lut-cwb = <0 0x75300000 0x00000000>; + + qcom,sde-cdp-setting = <1 1>, <1 0>; + + qcom,sde-qos-cpu-mask = <0x3>; + qcom,sde-qos-cpu-dma-latency = <300>; + + /* offsets are relative to "mdp_phys + qcom,sde-off */ + + qcom,sde-reg-dma-off = <0>; + qcom,sde-reg-dma-version = <0x00010001>; + qcom,sde-reg-dma-trigger-off = <0x119c>; + + qcom,sde-secure-sid-mask = <0x4400801>; + + qcom,sde-sspp-vig-blocks { + qcom,sde-vig-csc-off = <0x1a00>; + qcom,sde-vig-qseed-off = <0xa00>; + qcom,sde-vig-qseed-size = <0xa0>; + qcom,sde-vig-gamut = <0x1d00 0x00050000>; + qcom,sde-vig-igc = <0x1d00 0x00050000>; + qcom,sde-vig-inverse-pma; + }; + + qcom,sde-sspp-dma-blocks { + dgm@0 { + qcom,sde-dma-igc = <0x400 0x00050000>; + qcom,sde-dma-gc = <0x600 0x00050000>; + qcom,sde-dma-inverse-pma; + qcom,sde-dma-csc-off = <0x200>; + }; + dgm@1 { + qcom,sde-dma-igc = <0x1400 0x00050000>; + qcom,sde-dma-gc = <0x600 0x00050000>; + qcom,sde-dma-inverse-pma; + qcom,sde-dma-csc-off = <0x1200>; + }; + }; + + qcom,sde-dspp-blocks { + qcom,sde-dspp-igc = <0x0 0x00030001>; + qcom,sde-dspp-hsic = <0x800 0x00010007>; + qcom,sde-dspp-memcolor = <0x880 0x00010007>; + qcom,sde-dspp-hist = <0x800 0x00010007>; + qcom,sde-dspp-sixzone= <0x900 0x00010007>; + qcom,sde-dspp-vlut = <0xa00 0x00010008>; + qcom,sde-dspp-gamut = <0x1000 0x00040001>; + qcom,sde-dspp-pcc = <0x1700 0x00040000>; + qcom,sde-dspp-gc = <0x17c0 0x00010008>; + qcom,sde-dspp-dither = <0x82c 0x00010007>; + }; + + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "sde-vdd"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + + smmu_sde_sec: qcom,smmu_sde_sec_cb { + compatible = "qcom,smmu_sde_sec"; + iommus = <&apps_smmu 0x801 0x440>; + }; + + /* data and reg bus scale settings */ + qcom,sde-data-bus { + qcom,msm-bus,name = "mdss_sde"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <22 512 0 0>, <23 512 0 0>, + <22 512 0 6400000>, <23 512 0 6400000>, + <22 512 0 6400000>, <23 512 0 6400000>; + }; + + qcom,sde-reg-bus { + qcom,msm-bus,name = "mdss_reg"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 590 0 0>, + <1 590 0 76800>, + <1 590 0 150000>, + <1 590 0 300000>; + }; + }; + + sde_rscc: qcom,sde_rscc@af20000 { + cell-index = <0>; + compatible = "qcom,sde-rsc"; + reg = <0xaf20000 0x1c44>, + <0xaf30000 0x3fd4>; + reg-names = "drv", "wrapper"; + qcom,sde-rsc-version = <2>; + status = "disabled"; + + vdd-supply = <&mdss_core_gdsc>; + clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>, + <&clock_dispcc DISP_CC_MDSS_NON_GDSC_AHB_CLK>, + <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>; + clock-names = "vsync_clk", "gdsc_clk", "iface_clk"; + clock-rate = <0 0 0>; + + qcom,sde-dram-channels = <2>; + + mboxes = <&disp_rsc 0>; + mbox-names = "disp_rsc"; + + /* data and reg bus scale settings */ + qcom,sde-data-bus { + qcom,msm-bus,name = "disp_rsc_mnoc"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <20003 20515 0 0>, <20004 20515 0 0>, + <20003 20515 0 6400000>, <20004 20515 0 6400000>, + <20003 20515 0 6400000>, <20004 20515 0 6400000>; + }; + + qcom,sde-llcc-bus { + qcom,msm-bus,name = "disp_rsc_llcc"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20001 20513 0 0>, + <20001 20513 0 6400000>, + <20001 20513 0 6400000>; + }; + + qcom,sde-ebi-bus { + qcom,msm-bus,name = "disp_rsc_ebi"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20000 20512 0 0>, + <20000 20512 0 6400000>, + <20000 20512 0 6400000>; + }; + }; + + mdss_rotator: qcom,mdss_rotator@ae00000 { + compatible = "qcom,sde_rotator"; + reg = <0x0ae00000 0xac000>, + <0x0aeb8000 0x3000>; + reg-names = "mdp_phys", + "rot_vbif_phys"; + + #list-cells = <1>; + + qcom,mdss-rot-mode = <1>; + qcom,mdss-highest-bank-bit = <0x1>; + + /* Bus Scale Settings */ + qcom,msm-bus,name = "mdss_rotator"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <25 512 0 0>, + <25 512 0 6400000>, + <25 512 0 6400000>; + + rot-vdd-supply = <&mdss_core_gdsc>; + qcom,supply-names = "rot-vdd"; + + clocks = + <&clock_gcc GCC_DISP_AHB_CLK>, + <&clock_gcc GCC_DISP_SF_AXI_CLK>, + <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, + <&clock_dispcc DISP_CC_MDSS_ROT_CLK>; + clock-names = "gcc_iface", "gcc_bus", + "iface_clk", "rot_clk"; + + interrupt-parent = <&mdss_mdp>; + interrupts = <2 0>; + + power-domains = <&mdss_mdp>; + + /* Offline rotator QoS setting */ + qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>; + qcom,mdss-rot-vbif-memtype = <3 3>; + qcom,mdss-rot-cdp-setting = <1 1>; + qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>; + qcom,mdss-rot-danger-lut = <0x0 0x0>; + qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>; + + qcom,mdss-rot-qos-cpu-mask = <0xf>; + qcom,mdss-rot-qos-cpu-dma-latency = <75>; + + qcom,mdss-default-ot-rd-limit = <32>; + qcom,mdss-default-ot-wr-limit = <32>; + + qcom,mdss-sbuf-headroom = <20>; + + cache-slice-names = "rotator"; + cache-slices = <&llcc 4>; + + /* reg bus scale settings */ + rot_reg: qcom,rot-reg-bus { + qcom,msm-bus,name = "mdss_rot_reg"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 590 0 0>, + <1 590 0 76800>; + }; + + smmu_rot_unsec: qcom,smmu_rot_unsec_cb { + compatible = "qcom,smmu_sde_rot_unsec"; + iommus = <&apps_smmu 0x1020 0x0>; + }; + + smmu_rot_sec: qcom,smmu_rot_sec_cb { + compatible = "qcom,smmu_sde_rot_sec"; + iommus = <&apps_smmu 0x1021 0x0>; + }; + }; + + mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 { + compatible = "qcom,dsi-ctrl-hw-v2.3"; + label = "dsi-ctrl-0"; + cell-index = <0>; + reg = <0xae94000 0x400>, + <0xaf08000 0x4>; + reg-names = "dsi_ctrl", "disp_cc_base"; + interrupt-parent = <&mdss_mdp>; + interrupts = <4 0>; + vdda-1p2-supply = <&pm6150l_l3>; + clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, + <&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>, + <&clock_dispcc DISP_CC_MDSS_ESC0_CLK>; + clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk", + "pixel_clk", "pixel_clk_rcg", + "esc_clk"; + + qcom,ctrl-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,ctrl-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-1p2"; + qcom,supply-min-voltage = <1232000>; + qcom,supply-max-voltage = <1232000>; + qcom,supply-enable-load = <21800>; + qcom,supply-disable-load = <0>; + }; + }; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-name = "refgen"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 { + compatible = "qcom,dsi-ctrl-hw-v2.3"; + label = "dsi-ctrl-1"; + cell-index = <1>; + reg = <0xae96000 0x400>, + <0xaf08000 0x4>; + reg-names = "dsi_ctrl", "disp_cc_base"; + interrupt-parent = <&mdss_mdp>; + interrupts = <5 0>; + vdda-1p2-supply = <&pm6150l_l3>; + clocks = <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK>, + <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>, + <&clock_dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, + <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK>, + <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>, + <&clock_dispcc DISP_CC_MDSS_ESC1_CLK>; + clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk", + "pixel_clk", "pixel_clk_rcg", + "esc_clk"; + + qcom,ctrl-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,ctrl-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-1p2"; + qcom,supply-min-voltage = <1232000>; + qcom,supply-max-voltage = <1232000>; + qcom,supply-enable-load = <21800>; + qcom,supply-disable-load = <0>; + }; + }; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-name = "refgen"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 { + compatible = "qcom,dsi-phy-v3.0"; + label = "dsi-phy-0"; + cell-index = <0>; + reg = <0xae94400 0x7c0>; + reg-names = "dsi_phy"; + gdsc-supply = <&mdss_core_gdsc>; + vdda-0p9-supply = <&pm6150_l4>; + qcom,platform-strength-ctrl = [55 03 + 55 03 + 55 03 + 55 03 + 55 00]; + qcom,platform-lane-config = [00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 80]; + qcom,platform-regulator-settings = [1d 1d 1d 1d 1d]; + qcom,phy-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,phy-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-0p9"; + qcom,supply-min-voltage = <920000>; + qcom,supply-max-voltage = <920000>; + qcom,supply-enable-load = <36000>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dsi_phy1: qcom,mdss_dsi_phy1@ae96400 { + compatible = "qcom,dsi-phy-v3.0"; + label = "dsi-phy-1"; + cell-index = <1>; + reg = <0xae96400 0x7c0>; + reg-names = "dsi_phy"; + gdsc-supply = <&mdss_core_gdsc>; + vdda-0p9-supply = <&pm6150_l4>; + qcom,platform-strength-ctrl = [55 03 + 55 03 + 55 03 + 55 03 + 55 00]; + qcom,platform-lane-config = [00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 80]; + qcom,platform-regulator-settings = [1d 1d 1d 1d 1d]; + qcom,phy-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,phy-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-0p9"; + qcom,supply-min-voltage = <920000>; + qcom,supply-max-voltage = <920000>; + qcom,supply-enable-load = <36000>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + sde_dp: qcom,dp_display@0{ + status = "disabled"; + cell-index = <0>; + compatible = "qcom,dp-display"; + + vdda-1p2-supply = <&pm6150l_l3>; + vdda-0p9-supply = <&pm6150_l4>; + + reg = <0xae90000 0x0dc>, + <0xae90200 0x0c0>, + <0xae90400 0x508>, + <0xae90a00 0x094>, + <0x88eaa00 0x200>, + <0x88ea200 0x200>, + <0x88ea600 0x200>, + <0xaf02000 0x1a0>, + <0x780000 0x621c>, + <0x88ea040 0x10>, + <0x88e8000 0x20>, + <0x0aee1000 0x034>, + <0xae91000 0x094>; + /* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */ + reg-names = "dp_ahb", "dp_aux", "dp_link", + "dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1", + "dp_mmss_cc", "qfprom_physical", "dp_pll", + "usb3_dp_com", "hdcp_physical", "dp_p1"; + + interrupt-parent = <&mdss_mdp>; + interrupts = <12 0>; + + qcom,phy-version = <0x420>; + qcom,aux-cfg0-settings = [20 00]; + qcom,aux-cfg1-settings = [24 13]; + qcom,aux-cfg2-settings = [28 24]; + qcom,aux-cfg3-settings = [2c 00]; + qcom,aux-cfg4-settings = [30 0a]; + qcom,aux-cfg5-settings = [34 26]; + qcom,aux-cfg6-settings = [38 0a]; + qcom,aux-cfg7-settings = [3c 03]; + qcom,aux-cfg8-settings = [40 b7]; + qcom,aux-cfg9-settings = [44 03]; + + qcom,max-pclk-frequency-khz = <675000>; + + qcom,mst-enable; + + qcom,ctrl-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,ctrl-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-1p2"; + qcom,supply-min-voltage = <1200000>; + qcom,supply-max-voltage = <1200000>; + qcom,supply-enable-load = <21800>; + qcom,supply-disable-load = <0>; + }; + }; + + qcom,phy-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,phy-supply-entry@0 { + reg = <0>; + qcom,supply-name = "vdda-0p9"; + qcom,supply-min-voltage = <880000>; + qcom,supply-max-voltage = <880000>; + qcom,supply-enable-load = <36000>; + qcom,supply-disable-load = <0>; + }; + }; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-name = "refgen"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..fe5a89d4ced9d496674b29b7d9548d9097d70782 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal-overlay.dtsi @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include + +&thermal_zones { + pm6150-tz { + cooling-maps { + trip0_bat { + trip = <&pm6150_trip0>; + cooling-device = + <&pm6150_charger (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip1_bat { + trip = <&pm6150_trip1>; + cooling-device = + <&pm6150_charger THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + pm6150l-tz { + cooling-maps { + trip0_cpu0 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU0 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu1 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU1 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu2 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU2 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu3 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU3 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu4 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU4 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu5 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU5 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu6 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU6 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip0_cpu7 { + trip = <&pm6150l_trip0>; + cooling-device = + <&CPU7 (THERMAL_MAX_LIMIT-1) + (THERMAL_MAX_LIMIT-1)>; + }; + trip1_cpu1 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU1 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu2 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU2 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu3 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU3 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu4 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU4 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu5 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU5 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu6 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + trip1_cpu7 { + trip = <&pm6150l_trip1>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + pm6150-vbat-lvl0 { + cooling-maps { + vbat_cpu6 { + trip = <&vbat_lvl0>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + vbat_cpu7 { + trip = <&vbat_lvl0>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + pm6150-ibat-lvl0 { + cooling-maps { + ibat_cpu6 { + trip = <&ibat_lvl0>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + ibat_cpu7 { + trip = <&ibat_lvl0>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + soc { + cooling-maps { + soc_cpu6 { + trip = <&soc_trip>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + soc_cpu7 { + trip = <&soc_trip>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi index de85b18cbd72d02793e45fe588a75a3f52a6a541..d60d108f7e097f08e1a01fd1081402aa2cb94ee3 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi @@ -13,6 +13,81 @@ #include +&clock_cpucc { + #address-cells = <1>; + #size-cells = <1>; + lmh_dcvs0: qcom,limits-dcvs@18358800 { + compatible = "qcom,msm-hw-limits"; + interrupts = ; + qcom,affinity = <0>; + reg = <0x18358800 0x1000>, + <0x18323000 0x1000>; + #thermal-sensor-cells = <0>; + }; + + lmh_dcvs1: qcom,limits-dcvs@18350800 { + compatible = "qcom,msm-hw-limits"; + interrupts = ; + qcom,affinity = <1>; + reg = <0x18350800 0x1000>, + <0x18325800 0x1000>; + #thermal-sensor-cells = <0>; + }; +}; + +&soc { + qmi-tmd-devices { + compatible = "qcom,qmi-cooling-devices"; + + modem { + qcom,instance-id = <0x0>; + + modem_pa: modem_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem_proc: modem_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem_current: modem_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem_skin: modem_skin { + qcom,qmi-dev-name = "modem_skin"; + #cooling-cells = <2>; + }; + + modem_vdd: modem_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + adsp { + qcom,instance-id = <0x1>; + + adsp_vdd: adsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + cdsp { + qcom,instance-id = <0x43>; + + cdsp_vdd: cdsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; +}; + &thermal_zones { aoss-0-usr { polling-delay-passive = <0>; @@ -25,10 +100,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu0-usr { + cpu-0-0-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-governor = "user_space"; @@ -39,10 +119,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu1-usr { + cpu-0-1-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-governor = "user_space"; @@ -53,10 +138,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu2-usr { + cpu-0-2-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-governor = "user_space"; @@ -67,10 +157,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu3-usr { + cpu-0-3-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 4>; @@ -81,10 +176,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu4-usr { + cpu-0-4-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 5>; @@ -95,10 +195,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc0-cpu5-usr { + cpu-0-5-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 6>; @@ -109,6 +214,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -123,6 +233,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -137,10 +252,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc1-cpu0-usr { + cpu-1-0-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 9>; @@ -151,10 +271,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc1-cpu1-usr { + cpu-1-1-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 10>; @@ -165,10 +290,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc1-cpu2-usr { + cpu-1-2-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 11>; @@ -179,10 +309,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - apc1-cpu3-usr { + cpu-1-3-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 12>; @@ -193,6 +328,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -207,6 +347,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -221,6 +366,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -235,6 +385,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -249,10 +404,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - video-usr { + audio-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens1 2>; @@ -263,6 +423,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -277,6 +442,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -291,6 +461,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -305,10 +480,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - cmpss-usr { + mdm-core-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens1 6>; @@ -319,10 +499,15 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; - mdm-core-usr { + mdm-dsp-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens1 7>; @@ -333,6 +518,11 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -347,6 +537,30 @@ hysteresis = <1000>; type = "passive"; }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + }; + + video-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 9>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + reset-mon-cfg { + temperature = <115000>; + hysteresis = <5000>; + type = "passive"; + }; }; }; @@ -447,4 +661,878 @@ }; }; }; + + lmh-dcvs-00 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&lmh_dcvs0>; + + trips { + active-config { + temperature = <95000>; + hysteresis = <30000>; + type = "passive"; + }; + }; + }; + + lmh-dcvs-01 { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&lmh_dcvs1>; + + trips { + active-config { + temperature = <95000>; + hysteresis = <30000>; + type = "passive"; + }; + }; + }; + + gpuss-max-step { + polling-delay-passive = <10>; + polling-delay = <0>; + thermal-governor = "step_wise"; + trips { + gpu_trip: gpu-trip { + temperature = <95000>; + hysteresis = <0>; + type = "passive"; + }; + }; + }; + + cpu-0-max-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + trips { + silver-trip { + temperature = <120000>; + hysteresis = <0>; + type = "passive"; + }; + }; + }; + + cpu-1-max-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + trips { + gold-trip { + temperature = <120000>; + hysteresis = <0>; + type = "passive"; + }; + }; + }; + + cpu-0-0-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 1>; + trips { + cpu0_config: cpu0-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cpu0_config>; + cooling-device = + <&CPU0 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-0-1-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 2>; + trips { + cpu1_config: cpu1-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu1_cdev { + trip = <&cpu1_config>; + cooling-device = + <&CPU1 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-0-2-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 3>; + trips { + cpu2_config: cpu2-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu2_cdev { + trip = <&cpu2_config>; + cooling-device = + <&CPU2 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-0-3-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 4>; + trips { + cpu3_config: cpu3-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu3_cdev { + trip = <&cpu3_config>; + cooling-device = + <&CPU3 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-0-4-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 5>; + trips { + cpu4_config: cpu4-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu4_cdev { + trip = <&cpu4_config>; + cooling-device = + <&CPU4 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-0-5-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 6>; + trips { + cpu5_config: cpu5-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu5_cdev { + trip = <&cpu5_config>; + cooling-device = + <&CPU5 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-1-0-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 9>; + trips { + cpu6_0_config: cpu6-0-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu6_cdev { + trip = <&cpu6_0_config>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-1-1-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 10>; + trips { + cpu6_1_config: cpu6-1-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu6_cdev { + trip = <&cpu6_1_config>; + cooling-device = + <&CPU6 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-1-2-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 11>; + trips { + cpu7_0_config: cpu7-0-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu7_cdev { + trip = <&cpu7_0_config>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + cpu-1-3-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&tsens0 12>; + trips { + cpu7_1_config: cpu7-1-config { + temperature = <110000>; + hysteresis = <10000>; + type = "passive"; + }; + }; + cooling-maps { + cpu7_cdev { + trip = <&cpu7_1_config>; + cooling-device = + <&CPU7 THERMAL_MAX_LIMIT + THERMAL_MAX_LIMIT>; + }; + }; + }; + + aoss-0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 0>; + tracks-low; + trips { + aoss0_trip: aoss0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&aoss0_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&aoss0_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + cpu-0-0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 1>; + tracks-low; + trips { + cpu_0_0_trip: cpu-0-0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu_0_0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + cpu-1-0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 9>; + tracks-low; + trips { + cpu_1_0_trip: cpu-1-0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu_1_0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + gpuss-0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 13>; + tracks-low; + trips { + gpuss_0_trip: gpuss-0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&gpuss_0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + cwlan-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 1>; + tracks-low; + trips { + cwlan_trip: cwlan-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&cwlan_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&cwlan_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&cwlan_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cwlan_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&cwlan_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cwlan_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cwlan_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + audio-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 2>; + tracks-low; + trips { + audio_trip: audio-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&audio_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&audio_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&audio_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&audio_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&audio_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&audio_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&audio_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + ddr-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 3>; + tracks-low; + trips { + ddr_trip: ddr-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&ddr_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&ddr_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + q6-hvx-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 4>; + tracks-low; + trips { + q6_hvx_trip: q6-hvx-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&q6_hvx_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + camera-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 5>; + tracks-low; + trips { + camera_trip: camera-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&camera_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&camera_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + mdm-core-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 6>; + tracks-low; + trips { + mdm_core_trip: mdm-core-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&mdm_core_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + mdm-dsp-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 7>; + tracks-low; + trips { + mdm_dsp_trip: mdm-dsp-lowf-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&mdm_dsp_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + npu-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 8>; + tracks-low; + trips { + npu_trip: npu-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&npu_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&npu_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&npu_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&npu_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&npu_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&npu_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&npu_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; + + video-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 9>; + tracks-low; + trips { + video_trip: video-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_cdev { + trip = <&video_trip>; + cooling-device = <&CPU0 2 2>; + }; + cpu1_cdev { + trip = <&video_trip>; + cooling-device = <&CPU6 4 4>; + }; + cx_vdd_cdev { + trip = <&video_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&video_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + modem_vdd_cdev { + trip = <&video_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&video_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&video_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-usb.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-usb.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..7e6621765ccb9e3ce14bc2777af18c2b4d61147c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-usb.dtsi @@ -0,0 +1,366 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + usb0: ssusb@a600000 { + compatible = "qcom,dwc-usb3-msm"; + reg = <0x0a600000 0x100000>; + reg-names = "core_base"; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + interrupts = <0 489 0>, <0 130 0>, <0 486 0>, <0 488 0>; + interrupt-names = "dp_hs_phy_irq", "pwr_event_irq", + "ss_phy_irq", "dm_hs_phy_irq"; + USB3_GDSC-supply = <&usb30_prim_gdsc>; + qcom,use-pdc-interrupts; + + clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>, + <&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, + <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>, + <&clock_gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, + <&clock_gcc GCC_USB30_PRIM_SLEEP_CLK>, + <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>; + clock-names = "core_clk", "iface_clk", "bus_aggr_clk", + "utmi_clk", "sleep_clk", "xo"; + + resets = <&clock_gcc GCC_USB30_PRIM_BCR>; + reset-names = "core_reset"; + + qcom,core-clk-rate = <133333333>; + qcom,core-clk-rate-hs = <66666667>; + qcom,num-gsi-evt-buffs = <0x3>; + qcom,gsi-reg-offset = + <0x0fc /* GSI_GENERAL_CFG */ + 0x110 /* GSI_DBL_ADDR_L */ + 0x120 /* GSI_DBL_ADDR_H */ + 0x130 /* GSI_RING_BASE_ADDR_L */ + 0x144 /* GSI_RING_BASE_ADDR_H */ + 0x1a4>; /* GSI_IF_STS */ + qcom,dwc-usb3-msm-tx-fifo-size = <21288>; + qcom,pm-qos-latency = <62>; + + qcom,msm-bus,name = "usb0"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <3>; + qcom,msm-bus,vectors-KBps = + /* suspend vote */ + , + , + , + + /* svs vote */ + , + , + , + + /* min vote */ + , + , + ; + + dwc3@a600000 { + compatible = "snps,dwc3"; + reg = <0x0a600000 0xcd00>; + interrupts = <0 133 0>; + usb-phy = <&qusb_phy0>, <&usb_qmp_dp_phy>; + linux,sysdev_is_parent; + snps,disable-clk-gating; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + snps,usb3_lpm_capable; + usb-core-id = <0>; + tx-fifo-resize; + maximum-speed = "super-speed"; + dr_mode = "otg"; + }; + + qcom,usbbam@a704000 { + compatible = "qcom,usb-bam-msm"; + reg = <0xa704000 0x17000>; + interrupts = <0 132 0>; + + qcom,usb-bam-fifo-baseaddr = <0x146a6000>; + qcom,usb-bam-num-pipes = <4>; + qcom,disable-clk-gating; + qcom,usb-bam-override-threshold = <0x4001>; + qcom,usb-bam-max-mbps-highspeed = <400>; + qcom,usb-bam-max-mbps-superspeed = <3600>; + qcom,reset-bam-on-connect; + + qcom,pipe0 { + label = "ssusb-qdss-in-0"; + qcom,usb-bam-mem-type = <2>; + qcom,dir = <1>; + qcom,pipe-num = <0>; + qcom,peer-bam = <0>; + qcom,peer-bam-physical-address = <0x6064000>; + qcom,src-bam-pipe-index = <0>; + qcom,dst-bam-pipe-index = <0>; + qcom,data-fifo-offset = <0x0>; + qcom,data-fifo-size = <0x1800>; + qcom,descriptor-fifo-offset = <0x1800>; + qcom,descriptor-fifo-size = <0x800>; + }; + }; + }; + + /* Primary USB port related QUSB2 PHY */ + qusb_phy0: qusb@88e2000 { + compatible = "qcom,qusb2phy-v2"; + reg = <0x088e2000 0x400>, + <0x00780200 0x4>, + <0x088e7014 0x4>; + reg-names = "qusb_phy_base", "efuse_addr", + "refgen_north_bg_reg_addr"; + + qcom,efuse-bit-pos = <25>; + qcom,efuse-num-bits = <3>; + vdd-supply = <&pm6150_l4>; + vdda18-supply = <&pm6150_l11>; + vdda33-supply = <&pm6150_l17>; + qcom,override-bias-ctrl2; + qcom,vdd-voltage-level = <0 880000 880000>; + qcom,qusb-phy-reg-offset = + <0x240 /* QUSB2PHY_PORT_TUNE1 */ + 0x1a0 /* QUSB2PHY_PLL_COMMON_STATUS_ONE */ + 0x210 /* QUSB2PHY_PWR_CTRL1 */ + 0x230 /* QUSB2PHY_INTR_CTRL */ + 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */ + 0x254 /* QUSB2PHY_TEST1 */ + 0x198 /* PLL_BIAS_CONTROL_2 */ + 0x27c /* QUSB2PHY_DEBUG_CTRL1 */ + 0x280 /* QUSB2PHY_DEBUG_CTRL2 */ + 0x2a0>; /* QUSB2PHY_STAT5 */ + + qcom,qusb-phy-init-seq = + /* */ + <0x23 0x210 /* PWR_CTRL1 */ + 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */ + 0x7c 0x18c /* PLL_CLOCK_INVERTERS */ + 0x80 0x2c /* PLL_CMODE */ + 0x0a 0x184 /* PLL_LOCK_DELAY */ + 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */ + 0x40 0x194 /* PLL_BIAS_CONTROL_1 */ + 0x20 0x198 /* PLL_BIAS_CONTROL_2 */ + 0x21 0x214 /* PWR_CTRL2 */ + 0x00 0x220 /* IMP_CTRL1 */ + 0x58 0x224 /* IMP_CTRL2 */ + 0x30 0x240 /* TUNE1 */ + 0x29 0x244 /* TUNE2 */ + 0xca 0x248 /* TUNE3 */ + 0x04 0x24c /* TUNE4 */ + 0x03 0x250 /* TUNE5 */ + 0x00 0x23c /* CHG_CTRL2 */ + 0x22 0x210>; /* PWR_CTRL1 */ + + qcom,qusb-phy-host-init-seq = + /* */ + <0x23 0x210 /* PWR_CTRL1 */ + 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */ + 0x7c 0x18c /* PLL_CLOCK_INVERTERS */ + 0x80 0x2c /* PLL_CMODE */ + 0x0a 0x184 /* PLL_LOCK_DELAY */ + 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */ + 0x40 0x194 /* PLL_BIAS_CONTROL_1 */ + 0x20 0x198 /* PLL_BIAS_CONTROL_2 */ + 0x21 0x214 /* PWR_CTRL2 */ + 0x00 0x220 /* IMP_CTRL1 */ + 0x58 0x224 /* IMP_CTRL2 */ + 0x30 0x240 /* TUNE1 */ + 0x29 0x244 /* TUNE2 */ + 0xca 0x248 /* TUNE3 */ + 0x04 0x24c /* TUNE4 */ + 0x03 0x250 /* TUNE5 */ + 0x00 0x23c /* CHG_CTRL2 */ + 0x22 0x210>; /* PWR_CTRL1 */ + + phy_type= "utmi"; + clocks = <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + clock-names = "ref_clk_src", "cfg_ahb_clk"; + + resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>; + reset-names = "phy_reset"; + }; + + /* Primary USB port related QMP USB DP Combo PHY */ + usb_qmp_dp_phy: ssphy@88e8000 { + compatible = "qcom,usb-ssphy-qmp-dp-combo"; + reg = <0x88e8000 0x3000>; + reg-names = "qmp_phy_base"; + + vdd-supply = <&pm6150_l4>; + qcom,vdd-voltage-level = <0 880000 880000>; + core-supply = <&pm6150l_l3>; + qcom,vbus-valid-override; + qcom,qmp-phy-init-seq = + /* */ + <0x1048 0x07 0x00 /* COM_PLL_IVCO */ + 0x1080 0x14 0x00 /* COM_SYSCLK_EN_SEL */ + 0x1034 0x08 0x00 /* COM_BIAS_EN_CLKBUFLR_EN */ + 0x1138 0x30 0x00 /* COM_CLK_SELECT */ + 0x103c 0x02 0x00 /* COM_SYS_CLK_CTRL */ + 0x108c 0x08 0x00 /* COM_RESETSM_CNTRL2 */ + 0x115c 0x16 0x00 /* COM_CMN_CONFIG */ + 0x1164 0x01 0x00 /* COM_SVS_MODE_CLK_SEL */ + 0x113c 0x80 0x00 /* COM_HSCLK_SEL */ + 0x10b0 0x82 0x00 /* COM_DEC_START_MODE0 */ + 0x10b8 0xab 0x00 /* COM_DIV_FRAC_START1_MODE0 */ + 0x10bc 0xea 0x00 /* COM_DIV_FRAC_START2_MODE0 */ + 0x10c0 0x02 0x00 /* COM_DIV_FRAC_START3_MODE0 */ + 0x1060 0x06 0x00 /* COM_CP_CTRL_MODE0 */ + 0x1068 0x16 0x00 /* COM_PLL_RCTRL_MODE0 */ + 0x1070 0x36 0x00 /* COM_PLL_CCTRL_MODE0 */ + 0x10dc 0x00 0x00 /* COM_INTEGLOOP_GAIN1_MODE0 */ + 0x10d8 0x3f 0x00 /* COM_INTEGLOOP_GAIN0_MODE0 */ + 0x10f8 0x01 0x00 /* COM_VCO_TUNE2_MODE0 */ + 0x10f4 0xc9 0x00 /* COM_VCO_TUNE1_MODE0 */ + 0x1148 0x0a 0x00 /* COM_CORECLK_DIV_MODE0 */ + 0x10a0 0x00 0x00 /* COM_LOCK_CMP3_MODE0 */ + 0x109c 0x34 0x00 /* COM_LOCK_CMP2_MODE0 */ + 0x1098 0x15 0x00 /* COM_LOCK_CMP1_MODE0 */ + 0x1090 0x04 0x00 /* COM_LOCK_CMP_EN */ + 0x1154 0x00 0x00 /* COM_CORE_CLK_EN */ + 0x1094 0x00 0x00 /* COM_LOCK_CMP_CFG */ + 0x10f0 0x00 0x00 /* COM_VCO_TUNE_MAP */ + 0x1040 0x0a 0x00 /* COM_SYSCLK_BUF_ENABLE */ + 0x1010 0x01 0x00 /* COM_SSC_EN_CENTER */ + 0x101c 0x31 0x00 /* COM_SSC_PER1 */ + 0x1020 0x01 0x00 /* COM_SSC_PER2 */ + 0x1014 0x00 0x00 /* COM_SSC_ADJ_PER1 */ + 0x1018 0x00 0x00 /* COM_SSC_ADJ_PER2 */ + 0x1024 0x85 0x00 /* COM_SSC_STEP_SIZE1 */ + 0x1028 0x07 0x00 /* COM_SSC_STEP_SIZE2 */ + 0x1430 0x0b 0x00 /* RXA_UCDR_FASTLOCK_FO_GAIN */ + 0x14d4 0x0f 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL2 */ + 0x14d8 0x4e 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL3 */ + 0x14dc 0x18 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL4 */ + 0x14f8 0x77 0x00 /* RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */ + 0x14fc 0x80 0x00 /* RXA_RX_OFFSET_ADAPTOR_CNTRL2 */ + 0x1504 0x03 0x00 /* RXA_SIGDET_CNTRL */ + 0x150c 0x16 0x00 /* RXA_SIGDET_DEGLITCH_CNTRL */ + 0x1564 0x05 0x00 /* RXA_RX_MODE_00 */ + 0x14c0 0x03 0x00 /* RXA_VGA_CAL_CNTRL2 */ + 0x1830 0x0b 0x00 /* RXB_UCDR_FASTLOCK_FO_GAIN */ + 0x18d4 0x0f 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL2 */ + 0x18d8 0x4e 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL3 */ + 0x18dc 0x18 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL4 */ + 0x18f8 0x77 0x00 /* RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */ + 0x18fc 0x80 0x00 /* RXB_RX_OFFSET_ADAPTOR_CNTRL2 */ + 0x1904 0x03 0x00 /* RXB_SIGDET_CNTRL */ + 0x190c 0x16 0x00 /* RXB_SIGDET_DEGLITCH_CNTRL */ + 0x1964 0x05 0x00 /* RXB_RX_MODE_00 */ + 0x18c0 0x03 0x00 /* RXB_VGA_CAL_CNTRL2 */ + 0x1260 0x10 0x00 /* TXA_HIGHZ_DRVR_EN */ + 0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */ + 0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */ + 0x1248 0x09 0x00 /* TXA_RES_CODE_LANE_OFFSET_RX */ + 0x1244 0x06 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */ + 0x1660 0x10 0x00 /* TXB_HIGHZ_DRVR_EN */ + 0x16a4 0x12 0x00 /* TXB_RCV_DETECT_LVL_2 */ + 0x168c 0x16 0x00 /* TXB_LANE_MODE_1 */ + 0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */ + 0x1644 0x06 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */ + 0x1cc8 0x83 0x00 /* PCS_FLL_CNTRL2 */ + 0x1ccc 0x09 0x00 /* PCS_FLL_CNT_VAL_L */ + 0x1cd0 0xa2 0x00 /* PCS_FLL_CNT_VAL_H_TOL */ + 0x1cd4 0x40 0x00 /* PCS_FLL_MAN_CODE */ + 0x1cc4 0x02 0x00 /* PCS_FLL_CNTRL1 */ + 0x1c80 0xd1 0x00 /* PCS_LOCK_DETECT_CONFIG1 */ + 0x1c84 0x1f 0x00 /* PCS_LOCK_DETECT_CONFIG2 */ + 0x1c88 0x47 0x00 /* PCS_LOCK_DETECT_CONFIG3 */ + 0x1c64 0x1b 0x00 /* PCS_POWER_STATE_CONFIG2 */ + 0x1434 0x75 0x00 /* RXA_UCDR_SO_SATURATION */ + 0x1834 0x75 0x00 /* RXB_UCDR_SO_SATURATION */ + 0x1dd8 0xba 0x00 /* PCS_RX_SIGDET_LVL */ + 0x1c0c 0x9f 0x00 /* PCS_TXMGN_V0 */ + 0x1c10 0x9f 0x00 /* PCS_TXMGN_V1 */ + 0x1c14 0xb7 0x00 /* PCS_TXMGN_V2 */ + 0x1c18 0x4e 0x00 /* PCS_TXMGN_V3 */ + 0x1c1c 0x65 0x00 /* PCS_TXMGN_V4 */ + 0x1c20 0x6b 0x00 /* PCS_TXMGN_LS */ + 0x1c24 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V0 */ + 0x1c28 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V0 */ + 0x1c2c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V1 */ + 0x1c30 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V1 */ + 0x1c34 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V2 */ + 0x1c38 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V2 */ + 0x1c3c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V3 */ + 0x1c40 0x1d 0x00 /* PCS_TXDEEMPH_M3P5DB_V3 */ + 0x1c44 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V4 */ + 0x1c48 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V4 */ + 0x1c4c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_LS */ + 0x1c50 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_LS */ + 0x1e0c 0x21 0x00 /* PCS_REFGEN_REQ_CONFIG1 */ + 0x1e10 0x60 0x00 /* PCS_REFGEN_REQ_CONFIG2 */ + 0x1c5c 0x02 0x00 /* PCS_RATE_SLEW_CNTRL */ + 0x1ca0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */ + 0x1c8c 0x44 0x00 /* PCS_TSYNC_RSYNC_TIME */ + 0x1c70 0xe7 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_L */ + 0x1c74 0x03 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_H */ + 0x1c78 0x40 0x00 /* PCS_RCVR_DTCT_DLY_U3_L */ + 0x1c7c 0x00 0x00 /* PCS_RCVR_DTCT_DLY_U3_H */ + 0x1cb8 0x75 0x00 /* PCS_RXEQTRAINING_WAIT_TIME */ + 0x1cb0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */ + 0x1cbc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */ + 0x1cac 0x04 0x00 /* PCS_LFPS_DET_HIGH_COUNT_VAL */ + 0xffffffff 0xffffffff 0x00>; + + qcom,qmp-phy-reg-offset = + <0x1d74 /* USB3_DP_PCS_PCS_STATUS */ + 0x1cd8 /* USB3_DP_PCS_AUTONOMOUS_MODE_CTRL */ + 0x1cdc /* USB3_DP_PCS_LFPS_RXTERM_IRQ_CLEAR */ + 0x1c04 /* USB3_DP_PCS_POWER_DOWN_CONTROL */ + 0x1c00 /* USB3_DP_PCS_SW_RESET */ + 0x1c08 /* USB3_DP_PCS_START_CONTROL */ + 0xffff /* USB3_PHY_PCS_MISC_TYPEC_CTRL */ + 0x2a18 /* USB3_DP_DP_PHY_PD_CTL */ + 0x0008 /* USB3_DP_COM_POWER_DOWN_CTRL */ + 0x0004 /* USB3_DP_COM_SW_RESET */ + 0x001c /* USB3_DP_COM_RESET_OVRD_CTRL */ + 0x0000 /* USB3_DP_COM_PHY_MODE_CTRL */ + 0x0010 /* USB3_DP_COM_TYPEC_CTRL */ + 0x000c /* USB3_DP_COM_SWI_CTRL */ + 0x1a0c>; /* USB3_DP_PCS_MISC_CLAMP_ENABLE */ + + clocks = <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>, + <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>, + <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + clock-names = "aux_clk", "pipe_clk", "ref_clk_src", + "ref_clk", "com_aux_clk", "cfg_ahb_clk"; + + resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>, + <&clock_gcc GCC_USB3_PHY_PRIM_BCR>; + reset-names = "global_phy_reset", "phy_reset"; + }; + + usb_audio_qmi_dev { + compatible = "qcom,usb-audio-qmi-dev"; + iommus = <&apps_smmu 0x1b2f 0x0>; + qcom,usb-audio-stream-id = <0xf>; + qcom,usb-audio-intr-num = <2>; + }; + + usb_nop_phy: usb_nop_phy { + compatible = "usb-nop-xceiv"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..c6d7615903fd0486191fa7f71a1ba3bda3f4c245 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-idp.dtsi" + +/ { + model = "USBC Audio IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,msm-id = <365 0x0>; + qcom,board-id = <34 2>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..5ebc8612ec0340f8c4338fcc56bf098b07a083ae --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-usbc-idp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" +#include "sdmmagpie-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE PM6150 USBC Audio IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,board-id = <34 2>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-vidc.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..9a76c596e93ca28aa00de47b7e72bb5a24504d23 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-vidc.dtsi @@ -0,0 +1,217 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +&soc { + msm_vidc0: qcom,vidc0 { + compatible = "qcom,msm-vidc", "qcom,sdmmagpie-vidc"; + status = "ok"; + sku-index = <0>; + reg = <0xaa00000 0x200000>; + interrupts = ; + + /* Supply */ + iris-ctl-supply = <&mvsc_gdsc>; + vcodec-supply = <&mvs0_gdsc>; + cvp-supply = <&mvs1_gdsc>; + + /* Clocks */ + clock-names = "video_cc_mvsc_ctl_axi", "video_cc_mvs0_ctl_axi", + "video_cc_mvs1_ctl_axi", "core_clk", "vcodec_clk", + "cvp_clk"; + clocks = <&clock_videocc VIDEO_CC_MVSC_CTL_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVS0_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVS1_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVSC_CORE_CLK>, + <&clock_videocc VIDEO_CC_MVS0_CORE_CLK>, + <&clock_videocc VIDEO_CC_MVS1_CORE_CLK>; + qcom,proxy-clock-names = "video_cc_mvsc_ctl_axi", + "video_cc_mvs0_ctl_axi", "video_cc_mvs1_ctl_axi", + "core_clk", "vcodec_clk", "cvp_clk"; + + qcom,clock-configs = <0x0 0x0 0x0 0x1 0x1 0x1>; + qcom,allowed-clock-rates = <240000000 338000000 + 365000000 444000000 533000000>; + + /* Buses */ + bus_cnoc { + compatible = "qcom,msm-vidc,bus"; + label = "cnoc"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "performance"; + qcom,bus-range-kbps = <1000 1000>; + }; + + venus_bus_ddr { + compatible = "qcom,msm-vidc,bus"; + label = "venus-ddr"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "msm-vidc-ddr"; + qcom,bus-range-kbps = <1000 6533000>; + }; + arm9_bus_ddr { + compatible = "qcom,msm-vidc,bus"; + label = "venus-arm9-ddr"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "performance"; + qcom,bus-range-kbps = <1000 1000>; + }; + + /* MMUs */ + non_secure_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_ns"; + iommus = <&apps_smmu 0x2300 0x60>; + buffer-types = <0xfff>; + virtual-addr-pool = <0x25800000 0xba800000>; + }; + + secure_non_pixel_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_non_pixel"; + iommus = <&apps_smmu 0x2304 0x60>; + buffer-types = <0x480>; + virtual-addr-pool = <0x1000000 0x24800000>; + qcom,secure-context-bank; + }; + + secure_bitstream_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_bitstream"; + iommus = <&apps_smmu 0x2301 0x4>; + buffer-types = <0x241>; + virtual-addr-pool = <0x0 0xe0000000>; + qcom,secure-context-bank; + }; + + secure_pixel_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_pixel"; + iommus = <&apps_smmu 0x2303 0x20>; + buffer-types = <0x106>; + virtual-addr-pool = <0x0 0xe0000000>; + qcom,secure-context-bank; + }; + + /* Memory Heaps */ + qcom,msm-vidc,mem_cdsp { + compatible = "qcom,msm-vidc,mem-cdsp"; + memory-region = <&cdsp_mem>; + }; + }; + + msm_vidc1: qcom,vidc1 { + compatible = "qcom,msm-vidc", "qcom,sdmmagpie-vidc"; + status = "ok"; + sku-index = <1>; + reg = <0xaa00000 0x200000>; + interrupts = ; + + /* Supply */ + iris-ctl-supply = <&mvsc_gdsc>; + vcodec-supply = <&mvs0_gdsc>; + cvp-supply = <&mvs1_gdsc>; + + /* Clocks */ + clock-names = "gcc_video_axic", "gcc_video_axi0", + "gcc_video_axi1", "core_clk", "vcodec_clk", + "cvp_clk"; + clocks = <&clock_videocc VIDEO_CC_MVSC_CTL_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVS0_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVS1_AXI_CLK>, + <&clock_videocc VIDEO_CC_MVSC_CORE_CLK>, + <&clock_videocc VIDEO_CC_MVS0_CORE_CLK>, + <&clock_videocc VIDEO_CC_MVS1_CORE_CLK>; + qcom,proxy-clock-names = "gcc_video_axic", + "gcc_video_axi0", "gcc_video_axi1", + "core_clk", "vcodec_clk", "cvp_clk"; + + qcom,clock-configs = <0x0 0x0 0x0 0x1 0x1 0x1>; + qcom,allowed-clock-rates = <240000000 338000000 + 365000000 444000000 533000000>; + + /* Buses */ + bus_cnoc { + compatible = "qcom,msm-vidc,bus"; + label = "cnoc"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "performance"; + qcom,bus-range-kbps = <1000 1000>; + }; + + venus_bus_ddr { + compatible = "qcom,msm-vidc,bus"; + label = "venus-ddr"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "msm-vidc-ddr"; + qcom,bus-range-kbps = <1000 6533000>; + }; + arm9_bus_ddr { + compatible = "qcom,msm-vidc,bus"; + label = "venus-arm9-ddr"; + qcom,bus-master = ; + qcom,bus-slave = ; + qcom,bus-governor = "performance"; + qcom,bus-range-kbps = <1000 1000>; + }; + + /* MMUs */ + non_secure_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_ns"; + iommus = <&apps_smmu 0x2300 0x60>; + buffer-types = <0xfff>; + virtual-addr-pool = <0x25800000 0xba800000>; + }; + + secure_non_pixel_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_non_pixel"; + iommus = <&apps_smmu 0x2304 0x60>; + buffer-types = <0x480>; + virtual-addr-pool = <0x1000000 0x24800000>; + qcom,secure-context-bank; + }; + + secure_bitstream_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_bitstream"; + iommus = <&apps_smmu 0x2301 0x4>; + buffer-types = <0x241>; + virtual-addr-pool = <0x0 0xe0000000>; + qcom,secure-context-bank; + }; + + secure_pixel_cb { + compatible = "qcom,msm-vidc,context-bank"; + label = "venus_sec_pixel"; + iommus = <&apps_smmu 0x2303 0x20>; + buffer-types = <0x106>; + virtual-addr-pool = <0x0 0xe0000000>; + qcom,secure-context-bank; + }; + + /* Memory Heaps */ + qcom,msm-vidc,mem_cdsp { + compatible = "qcom,msm-vidc,mem-cdsp"; + memory-region = <&cdsp_mem>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dts b/arch/arm64/boot/dts/qcom/sdmmagpie.dts index 504b85f5c3e184438af7517749637735294e0f9d..780dbe2503d454a29606f00a34494bc85112c825 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dts +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dts @@ -17,5 +17,6 @@ / { model = "Qualcomm Technologies, Inc. SDMMAGPIE SoC"; compatible = "qcom,sdmmagpie"; + qcom,pmic-name = "PM6150"; qcom,board-id = <0 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index f796e71571d4309df54c2c64cb5d5ce8cbfbc561..76da2075d1b19dbb40e1ec626b4916670f78e5ac 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -22,11 +22,17 @@ #include #include #include +#include +#include + +#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024)) +#define BW_OPP_ENTRY(mhz, w) opp-mhz {opp-hz = /bits/ 64 ;} / { model = "Qualcomm Technologies, Inc. SDMMAGPIE"; compatible = "qcom,sdmmagpie"; qcom,msm-id = <365 0x0>; + qcom,msm-name = "SDMMAGPIE"; interrupt-parent = <&pdc>; aliases { @@ -51,8 +57,12 @@ compatible = "arm,armv8"; reg = <0x0 0x0>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_0>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_0: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -86,8 +96,12 @@ compatible = "arm,armv8"; reg = <0x0 0x100>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_100>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_100: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -116,8 +130,12 @@ compatible = "arm,armv8"; reg = <0x0 0x200>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_200>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_200: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -145,8 +163,12 @@ compatible = "arm,armv8"; reg = <0x0 0x300>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_300>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_300: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -174,8 +196,12 @@ compatible = "arm,armv8"; reg = <0x0 0x400>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_400>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_400: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -203,8 +229,12 @@ compatible = "arm,armv8"; reg = <0x0 0x500>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; cache-size = <0x8000>; next-level-cache = <&L2_500>; + qcom,lmh-dcvs = <&lmh_dcvs0>; + #cooling-cells = <2>; L2_500: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x20000>; @@ -232,8 +262,12 @@ compatible = "arm,armv8"; reg = <0x0 0x600>; enable-method = "psci"; + capacity-dmips-mhz = <1740>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; cache-size = <0x10000>; next-level-cache = <&L2_600>; + qcom,lmh-dcvs = <&lmh_dcvs1>; + #cooling-cells = <2>; L2_600: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x40000>; @@ -270,8 +304,12 @@ compatible = "arm,armv8"; reg = <0x0 0x700>; enable-method = "psci"; + capacity-dmips-mhz = <1740>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; cache-size = <0x10000>; next-level-cache = <&L2_700>; + qcom,lmh-dcvs = <&lmh_dcvs1>; + #cooling-cells = <2>; L2_700: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x40000>; @@ -343,6 +381,90 @@ }; }; + energy_costs: energy-costs { + compatible = "sched-energy"; + + CPU_COST_0: core-cost0 { + busy-cost-data = < + 300000 10 + 576000 18 + 768000 23 + 1017600 36 + 1248000 52 + 1324800 67 + 1497600 76 + 1612800 92 + 1708800 113 + 1804800 119 + >; + idle-cost-data = < + 16 12 8 6 + >; + }; + + CPU_COST_1: core-cost1 { + busy-cost-data = < + 300000 166 + 652800 242 + 806400 293 + 979200 424 + 1094400 470 + 1209600 621 + 1324800 676 + 1555200 973 + 1708800 1060 + 1843800 1298 + 1939200 1362 + 2169600 1801 + 2361600 2326 + 2438400 2568 + >; + idle-cost-data = < + 100 80 60 40 + >; + }; + + CLUSTER_COST_0: cluster-cost0 { + busy-cost-data = < + 300000 5 + 576000 5 + 768000 5 + 1017600 7 + 1248000 8 + 1324800 10 + 1497600 10 + 1612800 12 + 1708800 14 + 1804800 14 + >; + idle-cost-data = < + 4 3 2 1 + >; + }; + + CLUSTER_COST_1: cluster-cost1 { + busy-cost-data = < + 300000 19 + 652800 21 + 806400 21 + 979200 25 + 1094400 26 + 1209600 32 + 1324800 33 + 1555200 41 + 1708800 43 + 1843800 49 + 1939200 50 + 2169600 60 + 2361600 62 + 2438400 63 + >; + idle-cost-data = < + 4 3 2 1 + >; + }; + }; + psci { compatible = "arm,psci-1.0"; method = "smc"; @@ -357,6 +479,10 @@ firmware: firmware { android { compatible = "android,firmware"; + vbmeta { + compatible = "android,vbmeta"; + parts = "vbmeta,boot,system,vendor,dtbo"; + }; fstab { compatible = "android,fstab"; vendor { @@ -480,6 +606,14 @@ size = <0 0xc00000>; }; + cdsp_mem: cdsp_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x400000>; + }; + qseecom_ta_mem: qseecom_ta_region { compatible = "shared-dma-pool"; alloc-ranges = <0 0x00000000 0 0xffffffff>; @@ -504,8 +638,8 @@ size = <0 0x5c00000>; }; - cont_splash_memory: cont_splash_region@9d400000 { - reg = <0x0 0x9d400000 0x0 0x02400000>; + cont_splash_memory: cont_splash_region@9c000000 { + reg = <0x0 0x9c000000 0x0 0x02400000>; label = "cont_splash_region"; }; @@ -553,6 +687,33 @@ interrupt-controller; }; + qcom,memshare { + compatible = "qcom,memshare"; + + qcom,client_1 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <0>; + qcom,allocate-boot-time; + label = "modem"; + }; + + qcom,client_2 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <2>; + label = "modem"; + }; + + mem_client_3_size: qcom,client_3 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x500000>; + qcom,client-id = <1>; + qcom,allocate-on-request; + label = "modem"; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = , @@ -775,6 +936,11 @@ reg = <0x65c 4>; }; + dload_type@1c { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x1c 0x4>; + }; + boot_stats@6b0 { compatible = "qcom,msm-imem-boot_stats"; reg = <0x6b0 32>; @@ -789,6 +955,11 @@ compatible = "qcom,msm-imem-pil"; reg = <0x94c 200>; }; + + diag_dload@c8 { + compatible = "qcom,msm-imem-diag-dload"; + reg = <0xc8 200>; + }; }; restart@c264000 { @@ -891,6 +1062,8 @@ qcom,pet-time = <9360>; qcom,ipi-ping; qcom,wakeup-enable; + qcom,scandump-sizes = <0x10100 0x10100 0x10100 0x10100 + 0x10100 0x10100 0x25900 0x25900>; }; eud: qcom,msm-eud@88e0000 { @@ -1099,6 +1272,61 @@ }; }; + mem_dump { + compatible = "qcom,mem-dump"; + memory-region = <&dump_mem>; + + rpmh { + qcom,dump-size = <0x2000000>; + qcom,dump-id = <0xec>; + }; + + rpm_sw { + qcom,dump-size = <0x28000>; + qcom,dump-id = <0xea>; + }; + + pmic { + qcom,dump-size = <0x10000>; + qcom,dump-id = <0xe4>; + }; + + fcm { + qcom,dump-size = <0x8400>; + qcom,dump-id = <0xee>; + }; + + tmc_etf { + qcom,dump-size = <0x10000>; + qcom,dump-id = <0xf0>; + }; + + etf_swao { + qcom,dump-size = <0x8400>; + qcom,dump-id = <0xf1>; + }; + + etr_reg { + qcom,dump-size = <0x1000>; + qcom,dump-id = <0x100>; + }; + + etf_reg { + qcom,dump-size = <0x1000>; + qcom,dump-id = <0x101>; + }; + + etfswao_reg { + qcom,dump-size = <0x1000>; + qcom,dump-id = <0x102>; + }; + + misc_data { + qcom,dump-size = <0x1000>; + qcom,dump-id = <0xe8>; + }; + }; + thermal_zones: thermal-zones {}; tsens0: tsens@c222000 { @@ -1148,6 +1376,8 @@ qcom,llcc-perfmon { compatible = "qcom,llcc-perfmon"; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "qdss_clk"; }; qcom,llcc-erp { @@ -1477,6 +1707,21 @@ qcom,glink-channels = "g_glink_audio_data"; qcom,intents = <0x1000 2>; }; + + qcom,diag_data { + qcom,glink-channels = "DIAG_DATA"; + qcom,intents = <0x4000 2>; + }; + + qcom,diag_ctrl { + qcom,glink-channels = "DIAG_CTRL"; + qcom,intents = <0x4000 1>; + }; + + qcom,diag_cmd { + qcom,glink-channels = "DIAG_CMD"; + qcom,intents = <0x4000 1 >; + }; }; }; @@ -1583,6 +1828,18 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_ipa_1_out: qcom,smp2p-ipa-1-out { + qcom,entry-name = "ipa"; + #qcom,smem-state-cells = <1>; + }; + + /* ipa - inbound entry from mss */ + smp2p_ipa_1_in: qcom,smp2p-ipa-1-in { + qcom,entry-name = "ipa"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; qcom,smp2p-adsp { @@ -1682,6 +1939,44 @@ status = "disabled"; }; + qcom_seecom: qseecom@86d00000 { + compatible = "qcom,qseecom"; + reg = <0x86d00000 0xe00000>; + reg-names = "secapp-region"; + memory-region = <&qseecom_mem>; + qcom,hlos-num-ce-hw-instances = <1>; + qcom,hlos-ce-hw-instance = <0>; + qcom,qsee-ce-hw-instance = <0>; + qcom,disk-encrypt-pipe-pair = <2>; + qcom,support-fde; + qcom,no-clock-support; + qcom,fde-key-size; + qcom,appsbl-qseecom-support; + qcom,commonlib64-loaded-by-uefi; + qcom,qsee-reentrancy-support = <2>; + }; + + qcom_smcinvoke: smcinvoke@86d00000 { + compatible = "qcom,smcinvoke"; + reg = <0x86d00000 0xe00000>; + reg-names = "secapp-region"; + }; + + qcom_rng: qrng@793000 { + compatible = "qcom,msm-rng"; + reg = <0x793000 0x1000>; + qcom,msm-rng-iface-clk; + qcom,no-qrng-config; + qcom,msm-bus,name = "msm-rng-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 618 0 0>, /* No vote */ + <1 618 0 300000>; /* 75 MHz */ + clocks = <&clock_gcc GCC_PRNG_AHB_CLK>; + clock-names = "iface_clk"; + }; + ufsphy_mem: ufsphy_mem@1d87000 { reg = <0x1d87000 0xddc>; /* PHY regs */ reg-names = "phy_mem"; @@ -1808,6 +2103,8 @@ qcom,ssctl-instance-id = <0x14>; qcom,firmware-name = "adsp"; memory-region = <&pil_adsp_mem>; + qcom,signal-aop; + qcom,complete-ramdump; /* Inputs from lpass */ interrupts-extended = <&pdc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>, @@ -1838,6 +2135,67 @@ qcom,guard-memory; }; + qcom_cedev: qcedev@1de0000 { + compatible = "qcom,qcedev"; + reg = <0x1de0000 0x20000>, + <0x1dc4000 0x24000>; + reg-names = "crypto-base","crypto-bam-base"; + interrupts = <0 272 0>; + qcom,bam-pipe-pair = <3>; + qcom,ce-hw-instance = <0>; + qcom,ce-device = <0>; + qcom,ce-hw-shared; + qcom,bam-ee = <0>; + qcom,msm-bus,name = "qcedev-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <125 512 0 0>, + <125 512 393600 393600>; + qcom,smmu-s1-enable; + qcom,no-clock-support; + iommus = <&apps_smmu 0x0106 0x0011>, + <&apps_smmu 0x0116 0x0011>; + }; + + qcom_crypto: qcrypto@1de0000 { + compatible = "qcom,qcrypto"; + reg = <0x1de0000 0x20000>, + <0x1dc4000 0x24000>; + reg-names = "crypto-base","crypto-bam-base"; + interrupts = <0 272 0>; + qcom,bam-pipe-pair = <2>; + qcom,ce-hw-instance = <0>; + qcom,ce-device = <0>; + qcom,bam-ee = <0>; + qcom,ce-hw-shared; + qcom,clk-mgmt-sus-res; + qcom,msm-bus,name = "qcrypto-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <125 512 0 0>, + <125 512 393600 393600>; + qcom,use-sw-aes-cbc-ecb-ctr-algo; + qcom,use-sw-aes-xts-algo; + qcom,use-sw-aes-ccm-algo; + qcom,use-sw-ahash-algo; + qcom,use-sw-aead-algo; + qcom,use-sw-hmac-algo; + qcom,smmu-s1-enable; + qcom,no-clock-support; + iommus = <&apps_smmu 0x0104 0x0011>, + <&apps_smmu 0x0114 0x0011>; + }; + + qcom_tzlog: tz-log@146aa720 { + compatible = "qcom,tz-log"; + reg = <0x146aa720 0x3000>; + qcom,hyplog-enabled; + hyplog-address-offset = <0x410>; + hyplog-size-offset = <0x414>; + }; + spmi_bus: qcom,spmi@c440000 { compatible = "qcom,spmi-pmic-arb"; reg = <0xc440000 0x1100>, @@ -1878,6 +2236,8 @@ qcom,ssctl-instance-id = <0x17>; qcom,firmware-name = "cdsp"; memory-region = <&pil_cdsp_mem>; + qcom,signal-aop; + qcom,complete-ramdump; /* Inputs from turing */ interrupts-extended = <&pdc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>, @@ -1899,12 +2259,234 @@ mboxes = <&qmp_aop 0>; mbox-names = "cdsp-pil"; }; + + pil_modem: qcom,mss@4080000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x4080000 0x100>; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + vdd_cx-supply = <&VDD_CX_LEVEL>; + qcom,vdd_cx-uV-uA = ; + vdd_mss-supply = <&VDD_MSS_LEVEL>; + qcom,vdd_mss-uV-uA = ; + qcom,proxy-reg-names = "vdd_cx", "vdd_mss"; + + qcom,firmware-name = "modem"; + memory-region = <&pil_modem_mem>; + qcom,proxy-timeout-ms = <10000>; + qcom,sysmon-id = <0>; + qcom,ssctl-instance-id = <0x12>; + qcom,pas-id = <4>; + qcom,smem-id = <421>; + qcom,signal-aop; + qcom,complete-ramdump; + + /* Inputs from mss */ + interrupts-extended = <&pdc GIC_SPI 266 IRQ_TYPE_EDGE_RISING>, + <&modem_smp2p_in 0 IRQ_TYPE_NONE>, + <&modem_smp2p_in 1 IRQ_TYPE_NONE>, + <&modem_smp2p_in 2 IRQ_TYPE_NONE>, + <&modem_smp2p_in 3 IRQ_TYPE_NONE>, + <&modem_smp2p_in 7 IRQ_TYPE_NONE>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,err-ready", + "qcom,proxy-unvote", + "qcom,stop-ack", + "qcom,shutdown-ack"; + + /* Outputs to mss */ + qcom,smem-states = <&modem_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + + mboxes = <&qmp_aop 0>; + mbox-names = "mss-pil"; + }; + + qcom,venus@aae0000 { + compatible = "qcom,pil-tz-generic"; + reg = <0xaae0000 0x4000>; + + vdd-supply = <&mvsc_gdsc>; + qcom,proxy-reg-names = "vdd"; + + clocks = <&clock_videocc VIDEO_CC_XO_CLK>, + <&clock_videocc VIDEO_CC_MVSC_CORE_CLK>, + <&clock_videocc VIDEO_CC_IRIS_AHB_CLK>; + clock-names = "xo", "core", "ahb"; + qcom,proxy-clock-names = "xo", "core", "ahb"; + + qcom,core-freq = <200000000>; + qcom,ahb-freq = <200000000>; + + qcom,pas-id = <9>; + qcom,msm-bus,name = "pil-venus"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = <63 512 0 0>, + <63 512 0 304000>; + qcom,proxy-timeout-ms = <100>; + qcom,firmware-name = "venus"; + memory-region = <&pil_video_mem>; + }; + + qcom,npu@0x9800000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x9800000 0x800000>; + + status = "ok"; + qcom,pas-id = <23>; + qcom,firmware-name = "npu"; + memory-region = <&npu_mem>; + }; + + icnss: qcom,icnss@18800000 { + status = "disabled"; + compatible = "qcom,icnss"; + reg = <0x18800000 0x800000>; + reg-names = "membase"; + interrupts = <0 414 0 /* CE0 */ >, + <0 415 0 /* CE1 */ >, + <0 416 0 /* CE2 */ >, + <0 417 0 /* CE3 */ >, + <0 418 0 /* CE4 */ >, + <0 419 0 /* CE5 */ >, + <0 420 0 /* CE6 */ >, + <0 421 0 /* CE7 */ >, + <0 422 0 /* CE8 */ >, + <0 423 0 /* CE9 */ >, + <0 424 0 /* CE10 */ >, + <0 425 0 /* CE11 */ >; + qcom,smmu-s1-bypass; + qcom,wlan-msa-memory = <0x100000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; + }; + + qcom,msm_gsi { + compatible = "qcom,msm_gsi"; + }; + + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa3"; + qcom,rmnet-ipa-ssr; + qcom,ipa-platform-type-msm; + qcom,ipa-advertise-sg-support; + qcom,ipa-napi-enable; + }; + + ipa_hw: qcom,ipa@1e00000 { + compatible = "qcom,ipa"; + reg = <0x1e00000 0x34000>, + <0x1e04000 0x2c000>; + reg-names = "ipa-base", "gsi-base"; + interrupts = <0 311 0>, <0 432 0>; + interrupt-names = "ipa-irq", "gsi-irq"; + qcom,ipa-hw-ver = <16>; /* IPA core version = IPAv4.2 */ + qcom,ipa-hw-mode = <0>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,modem-cfg-emb-pipe-flt; + qcom,ipa-wdi2; + qcom,ipa-wdi2_over_gsi; + qcom,ipa-fltrt-not-hashable; + qcom,use-64-bit-dma-mask; + qcom,arm-smmu; + qcom,smmu-fast-map; + qcom,use-ipa-pm; + qcom,bandwidth-vote-for-ipa; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <5>; + qcom,msm-bus,num-paths = <4>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + , + , + , + , + /* SVS2 */ + , + , + , + , + /* SVS */ + , + , + , + , + /* NOMINAL */ + , + , + , + , + /* TURBO */ + , + , + , + ; + qcom,bus-vector-names = + "MIN", "SVS2", "SVS", "NOMINAL", "TURBO"; + qcom,throughput-threshold = <310 600 1000>; + qcom,scaling-exceptions = <>; + + /* smp2p information */ + qcom,smp2p_map_ipa_1_out { + compatible = "qcom,smp2p-map-ipa-1-out"; + qcom,smem-states = <&smp2p_ipa_1_out 0>; + qcom,smem-state-names = "ipa-smp2p-out"; + }; + + qcom,smp2p_map_ipa_1_in { + compatible = "qcom,smp2p-map-ipa-1-in"; + interrupts-extended = <&smp2p_ipa_1_in 0 0>; + interrupt-names = "ipa-smp2p-in"; + }; + }; + + ipa_smmu_ap: ipa_smmu_ap { + compatible = "qcom,ipa-smmu-ap-cb"; + qcom,smmu-s1-bypass; + iommus = <&apps_smmu 0x520 0x0>; + qcom,iova-mapping = <0x20000000 0x40000000>; + /* modem tables in IMEM */ + qcom,additional-mapping = <0x146bd000 0x146bd000 0x2000>; + }; + + ipa_smmu_wlan: ipa_smmu_wlan { + compatible = "qcom,ipa-smmu-wlan-cb"; + qcom,smmu-s1-bypass; + iommus = <&apps_smmu 0x521 0x0>; + /* ipa-uc ram */ + qcom,additional-mapping = <0x1e60000 0x1e60000 0x80000>; + }; + + ipa_smmu_uc: ipa_smmu_uc { + compatible = "qcom,ipa-smmu-uc-cb"; + qcom,smmu-s1-bypass; + iommus = <&apps_smmu 0x522 0x0>; + qcom,iova-mapping = <0x40400000 0x1fc00000>; + }; + + qcom,ipa_fws { + compatible = "qcom,pil-tz-generic"; + qcom,pas-id = <0xf>; + qcom,firmware-name = "ipa_fws"; + qcom,pil-force-shutdown; + memory-region = <&pil_ipa_fw_mem>; + }; + }; #include "sdmmagpie-pinctrl.dtsi" #include "sdmmagpie-gdsc.dtsi" #include "sdmmagpie-bus.dtsi" #include "sdmmagpie-qupv3.dtsi" +#include "sdmmagpie-vidc.dtsi" +#include "sdmmagpie-sde-pll.dtsi" +#include "sdmmagpie-sde.dtsi" &pcie_0_gdsc { status = "ok"; @@ -2023,9 +2605,16 @@ #include "sdmmagpie-pm.dtsi" #include "pm6150.dtsi" #include "pm6150l.dtsi" +#include "pm8009.dtsi" #include "sdmmagpie-regulator.dtsi" #include "sdmmagpie-coresight.dtsi" +#include "sdmmagpie-usb.dtsi" #include "sdmmagpie-thermal.dtsi" +#include "sdmmagpie-audio.dtsi" + +&usb0 { + extcon = <&pm6150_pdphy>, <&pm6150_charger>, <&eud>; +}; &pm6150_vadc { rf_pa0_therm { @@ -2051,6 +2640,281 @@ qcom,hw-settle-time = <200>; qcom,pre-scaling = <1 1>; }; + + llcc_pmu: llcc-pmu@90cc000 { + compatible = "qcom,qcom-llcc-pmu"; + reg = <0x090cc000 0x300>; + reg-names = "lagg-base"; + }; + + llcc_bw_opp_table: llcc-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 300, 16); /* 4577 MB/s */ + BW_OPP_ENTRY( 466, 16); /* 7110 MB/s */ + BW_OPP_ENTRY( 600, 16); /* 9155 MB/s */ + BW_OPP_ENTRY( 806, 16); /* 12298 MB/s */ + BW_OPP_ENTRY( 933, 16); /* 14236 MB/s */ + }; + + cpu_cpu_llcc_bw: qcom,cpu-cpu-llcc-bw { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&llcc_bw_opp_table>; + }; + + cpu_cpu_llcc_bwmon: qcom,cpu-cpu-llcc-bwmon@90b6300 { + compatible = "qcom,bimc-bwmon4"; + reg = <0x90b6300 0x300>, <0x90b6200 0x200>; + reg-names = "base", "global_base"; + interrupts = ; + qcom,mport = <0>; + qcom,hw-timer-hz = <19200000>; + qcom,target-dev = <&cpu_cpu_llcc_bw>; + qcom,count-unit = <0x10000>; + }; + + ddr_bw_opp_table: ddr-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ + BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ + BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ + BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ + BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ + BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ + BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ + BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ + BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ + BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ + }; + + cpu_llcc_ddr_bw: qcom,cpu-llcc-ddr-bw { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu_llcc_ddr_bwmon: qcom,cpu-llcc-ddr-bwmon@90cd000 { + compatible = "qcom,bimc-bwmon5"; + reg = <0x90cd000 0x1000>; + reg-names = "base"; + interrupts = ; + qcom,hw-timer-hz = <19200000>; + qcom,target-dev = <&cpu_llcc_ddr_bw>; + qcom,count-unit = <0x10000>; + }; + + suspendable_ddr_bw_opp_table: suspendable-ddr-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 0, 4); /* 0 MB/s */ + BW_OPP_ENTRY( 200, 4); /* 762 MB/s */ + BW_OPP_ENTRY( 300, 4); /* 1144 MB/s */ + BW_OPP_ENTRY( 451, 4); /* 1720 MB/s */ + BW_OPP_ENTRY( 547, 4); /* 2086 MB/s */ + BW_OPP_ENTRY( 681, 4); /* 2597 MB/s */ + BW_OPP_ENTRY( 768, 4); /* 2929 MB/s */ + BW_OPP_ENTRY(1017, 4); /* 3879 MB/s */ + BW_OPP_ENTRY(1353, 4); /* 5161 MB/s */ + BW_OPP_ENTRY(1555, 4); /* 5931 MB/s */ + BW_OPP_ENTRY(1804, 4); /* 6881 MB/s */ + }; + + cdsp_cdsp_l3_lat: qcom,cdsp-cdsp-l3-lat { + compatible = "devfreq-simple-dev"; + clock-names = "devfreq_clk"; + clocks = <&clock_cpucc L3_MISC_VOTE_CLK>; + governor = "powersave"; + }; + + cpu0_cpu_l3_lat: qcom,cpu0-cpu-l3-lat { + compatible = "devfreq-simple-dev"; + clock-names = "devfreq_clk"; + clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>; + governor = "performance"; + }; + + cpu0_cpu_l3_latmon: qcom,cpu0-cpu-l3-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; + qcom,target-dev = <&cpu0_cpu_l3_lat>; + qcom,cachemiss-ev = <0x17>; + qcom,core-dev-table = + < 768000 300000000 >, + < 1017600 566800000 >, + < 1248000 768000000 >, + < 1497000 940800000 >, + < 1804800 1459200000 >; + }; + + cpu6_cpu_l3_lat: qcom,cpu6-cpu-l3-lat { + compatible = "devfreq-simple-dev"; + clock-names = "devfreq_clk"; + clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>; + governor = "performance"; + }; + + cpu6_cpu_l3_latmon: qcom,cpu6-cpu-l3-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU6 &CPU7>; + qcom,target-dev = <&cpu6_cpu_l3_lat>; + qcom,cachemiss-ev = <0x17>; + qcom,core-dev-table = + < 1094000 566800000 >, + < 1324000 768000000 >, + < 1708800 1190800000 >, + < 1939000 1382000000 >, + < 2438400 1459200000 >; + }; + + cpu0_cpu_llcc_lat: qcom,cpu0-cpu-llcc-lat { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&llcc_bw_opp_table>; + }; + + cpu0_cpu_llcc_latmon: qcom,cpu0-cpu-llcc-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; + qcom,target-dev = <&cpu0_cpu_llcc_lat>; + qcom,cachemiss-ev = <0x2A>; + qcom,core-dev-table = + < 1324000 MHZ_TO_MBPS(300, 16) >, + < 1497000 MHZ_TO_MBPS(466, 16) >, + < 1804800 MHZ_TO_MBPS(600, 16) >; + }; + + cpu6_cpu_llcc_lat: qcom,cpu6-cpu-llcc-lat { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&llcc_bw_opp_table>; + }; + + cpu6_cpu_llcc_latmon: qcom,cpu6-cpu-llcc-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU6 &CPU7>; + qcom,target-dev = <&cpu6_cpu_llcc_lat>; + qcom,cachemiss-ev = <0x2A>; + qcom,core-dev-table = + < 806000 MHZ_TO_MBPS(300, 16) >, + < 1094000 MHZ_TO_MBPS(466, 16) >, + < 1324000 MHZ_TO_MBPS(600, 16) >, + < 1708800 MHZ_TO_MBPS(806, 16) >, + < 2438400 MHZ_TO_MBPS(933, 16) >; + }; + + cpu0_llcc_ddr_lat: qcom,cpu0-llcc-ddr-lat { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu0_llcc_ddr_latmon: qcom,cpu0-llcc-ddr-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; + qcom,target-dev = <&cpu0_llcc_ddr_lat>; + qcom,cachemiss-ev = <0x1000>; + qcom,core-dev-table = + < 768000 MHZ_TO_MBPS( 300, 4) >, + < 1017600 MHZ_TO_MBPS( 451, 4) >, + < 1248000 MHZ_TO_MBPS( 547, 4) >, + < 1497000 MHZ_TO_MBPS( 768, 4) >, + < 1804800 MHZ_TO_MBPS(1017, 4) >; + }; + + cpu6_llcc_ddr_lat: qcom,cpu6-llcc-ddr-lat { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu6_llcc_ddr_latmon: qcom,cpu6-llcc-ddr-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU6 &CPU7>; + qcom,target-dev = <&cpu6_llcc_ddr_lat>; + qcom,cachemiss-ev = <0x1000>; + qcom,core-dev-table = + < 806000 MHZ_TO_MBPS( 451, 4) >, + < 1094000 MHZ_TO_MBPS( 547, 4) >, + < 1324000 MHZ_TO_MBPS(1017, 4) >, + < 1708800 MHZ_TO_MBPS(1555, 4) >, + < 2438400 MHZ_TO_MBPS(1804, 4) >; + }; + + cpu0_cpu_ddr_latfloor: qcom,cpu0-cpu-ddr-latfloor { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu0_computemon: qcom,cpu0-computemon { + compatible = "qcom,arm-cpu-mon"; + qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; + qcom,target-dev = <&cpu0_cpu_ddr_latfloor>; + qcom,core-dev-table = + < 768000 MHZ_TO_MBPS( 300, 4) >, + < 1248000 MHZ_TO_MBPS( 451, 4) >, + < 1497000 MHZ_TO_MBPS( 547, 4) >, + < 1804800 MHZ_TO_MBPS( 768,4) >; + }; + + cpu6_cpu_ddr_latfloor: qcom,cpu6-cpu-ddr-latfloor { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = + ; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu6_computemon: qcom,cpu6-computemon { + compatible = "qcom,arm-cpu-mon"; + qcom,cpulist = <&CPU6 &CPU7>; + qcom,target-dev = <&cpu6_cpu_ddr_latfloor>; + qcom,core-dev-table = + < 1094000 MHZ_TO_MBPS( 300, 4) >, + < 1324000 MHZ_TO_MBPS( 547, 4) >, + < 1552200 MHZ_TO_MBPS( 768, 4) >, + < 1708000 MHZ_TO_MBPS(1017, 4) >, + < 2438400 MHZ_TO_MBPS(1804, 4) >; + }; + + npu_npu_ddr_bw: qcom,npu-npu-ddr-bw { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = ; + operating-points-v2 = <&suspendable_ddr_bw_opp_table>; + }; + + npu_npu_ddr_bwmon: qcom,npu-npu-ddr-bwmon@9960300 { + compatible = "qcom,bimc-bwmon4"; + reg = <0x9960300 0x300>, <0x9960200 0x200>; + reg-names = "base", "global_base"; + interrupts = ; + qcom,mport = <0>; + qcom,hw-timer-hz = <19200000>; + qcom,target-dev = <&npu_npu_ddr_bw>; + qcom,count-unit = <0x10000>; + }; }; &pm6150_adc_tm { @@ -2086,6 +2950,9 @@ }; &pm6150l_vadc { + pinctrl-names = "default"; + pinctrl-0 = <&nvm_therm_default>; + conn_therm { reg = ; label = "conn_therm"; @@ -2118,6 +2985,15 @@ }; }; +&pm6150l_gpios { + nvm_therm { + nvm_therm_default: nvm_therm_default { + pins = "gpio10"; + bias-high-impedance; + }; + }; +}; + &pm6150l_adc_tm { io-channels = <&pm6150l_vadc ADC_AMUX_THM1_PU2>, <&pm6150l_vadc ADC_AMUX_THM3_PU2>, @@ -2142,3 +3018,5 @@ qcom,hw-settle-time = <200>; }; }; + +#include "sdmmagpie-npu.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpiep-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..bb5ab013bb816098a619368f4acc4bec1ee3536c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep-idp-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-idp.dtsi" + +/ { + model = "IDP"; + compatible = "qcom,sdmmagpiep-idp", "qcom,sdmmagpiep", "qcom,idp"; + qcom,msm-id = <366 0x0>; + qcom,board-id = <34 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep-idp.dts b/arch/arm64/boot/dts/qcom/sdmmagpiep-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..30bdd6eb4516f04a122f40b47f9979dacf75f770 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep-idp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpiep.dtsi" +#include "sdmmagpie-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIEP PM6150 IDP"; + compatible = "qcom,sdmmagpiep-idp", "qcom,sdmmagpiep", "qcom,idp"; + qcom,board-id = <34 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..2172498a631fa4bdc6ad30919be3ba99410680e7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-qrd.dtsi" + +/ { + model = "QRD"; + compatible = "qcom,sdmmagpiep-qrd", "qcom,sdmmagpiep", "qcom,qrd"; + qcom,msm-id = <366 0>; + qcom,board-id = <11 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd.dts b/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd.dts new file mode 100644 index 0000000000000000000000000000000000000000..77df22dedd4f977e9d02a4297238ad7b5762bb2b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep-qrd.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpiep.dtsi" +#include "sdmmagpie-qrd.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIEP PM6150 QRD"; + compatible = "qcom,sdmmagpiep-qrd", "qcom,sdmmagpiep", "qcom,qrd"; + qcom,board-id = <11 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep.dts b/arch/arm64/boot/dts/qcom/sdmmagpiep.dts new file mode 100644 index 0000000000000000000000000000000000000000..9f63a0faaf57ab61fecb30e095073d4a4ea32d7f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpiep.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIEP SoC"; + compatible = "qcom,sdmmagpiep"; + qcom,pmic-name = "PM6150"; + qcom,board-id = <0 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpiep.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpiep.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..491e6ebd9af7fcb0682c73eac1a3f84e1c9d3705 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpiep.dtsi @@ -0,0 +1,19 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdmmagpie.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIEP"; + qcom,msm-name = "SDMMAGPIEP"; + qcom,msm-id = <366 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-bus.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-bus.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..71a302195055ae89b0cc647c54302f1a0c058362 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-bus.dtsi @@ -0,0 +1,972 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + ad_hoc_bus: ad-hoc-bus { + compatible = "qcom,msm-bus-device"; + reg = <0x1100000 0x400000>, + <0x1100000 0x400000>, + <0x1620000 0x400000>, + <0x1620000 0x400000>; + + reg-names = "mc_virt-base", "mem_noc-base", + "system_noc-base", "ipa_virt-base"; + + mbox-names = "apps_rsc"; + + /*RSCs*/ + rsc_apps: rsc-apps { + cell-id = ; + label = "apps_rsc"; + qcom,rsc-dev; + qcom,req-state = <2>; + }; + + /*BCMs*/ + bcm_mc0: bcm-mc0 { + cell-id = ; + label = "MC0"; + qcom,bcm-name = "MC0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh0: bcm-sh0 { + cell-id = ; + label = "SH0"; + qcom,bcm-name = "SH0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ce0: bcm-ce0 { + cell-id = ; + label = "CE0"; + qcom,bcm-name = "CE0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ip0: bcm-ip0 { + cell-id = ; + label = "IP0"; + qcom,bcm-name = "IP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_pn0: bcm-pn0 { + cell-id = ; + label = "PN0"; + qcom,bcm-name = "PN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_qp0: bcm-qp0 { + cell-id = ; + label = "QP0"; + qcom,bcm-name = "QP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh3: bcm-sh3 { + cell-id = ; + label = "SH3"; + qcom,bcm-name = "SH3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh4: bcm-sh4 { + cell-id = ; + label = "SH4"; + qcom,bcm-name = "SH4"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn0: bcm-sn0 { + cell-id = ; + label = "SN0"; + qcom,bcm-name = "SN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn1: bcm-sn1 { + cell-id = ; + label = "SN1"; + qcom,bcm-name = "SN1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_pn1: bcm-pn1 { + cell-id = ; + label = "PN1"; + qcom,bcm-name = "PN1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_pn2: bcm-pn2 { + cell-id = ; + label = "PN2"; + qcom,bcm-name = "PN2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn3: bcm-sn3 { + cell-id = ; + label = "SN3"; + qcom,bcm-name = "SN3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_pn3: bcm-pn3 { + cell-id = ; + label = "PN3"; + qcom,bcm-name = "PN3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn4: bcm-sn4 { + cell-id = ; + label = "SN4"; + qcom,bcm-name = "SN4"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_pn5: bcm-pn5 { + cell-id = ; + label = "PN5"; + qcom,bcm-name = "PN5"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn6: bcm-sn6 { + cell-id = ; + label = "SN6"; + qcom,bcm-name = "SN6"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn7: bcm-sn7 { + cell-id = ; + label = "SN7"; + qcom,bcm-name = "SN7"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn8: bcm-sn8 { + cell-id = ; + label = "SN8"; + qcom,bcm-name = "SN8"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn9: bcm-sn9 { + cell-id = ; + label = "SN9"; + qcom,bcm-name = "SN9"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn10: bcm-sn10 { + cell-id = ; + label = "SN10"; + qcom,bcm-name = "SN10"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn11: bcm-sn11 { + cell-id = ; + label = "SN11"; + qcom,bcm-name = "SN11"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + + /*Buses*/ + fab_ipa_virt: fab-ipa_virt{ + cell-id = ; + label = "fab-ipa_virt"; + qcom,fab-dev; + qcom,base-name = "ipa_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mc_virt: fab-mc_virt{ + cell-id = ; + label = "fab-mc_virt"; + qcom,fab-dev; + qcom,base-name = "mc_virt-base"; + qcom,qos-off = <0>; + qcom,base-offset = <0>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mem_noc: fab-mem_noc{ + cell-id = ; + label = "fab-mem_noc"; + qcom,fab-dev; + qcom,base-name = "mem_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <65536>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_system_noc: fab-system_noc{ + cell-id = ; + label = "fab-system_noc"; + qcom,fab-dev; + qcom,base-name = "system_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <49152>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + + /*Masters*/ + + mas_ipa_core_master: mas-ipa-core-master { + cell-id = ; + label = "mas-ipa-core-master"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ipa_core_slave>; + qcom,bus-dev = <&fab_ipa_virt>; + }; + + mas_llcc_mc: mas-llcc-mc { + cell-id = ; + label = "mas-llcc-mc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ebi>; + qcom,bus-dev = <&fab_mc_virt>; + }; + + mas_acm_tcu: mas-acm-tcu { + cell-id = ; + label = "mas-acm-tcu"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns_llcc + &slv_qns_memnoc_snoc &slv_qns_sys_pcie>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <6>; + }; + + mas_qnm_snoc_gc: mas-qnm-snoc-gc { + cell-id = ; + label = "mas-qnm-snoc-gc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <8>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_xm_apps_rdwr: mas-xm-apps-rdwr { + cell-id = ; + label = "mas-xm-apps-rdwr"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_llcc + &slv_qns_memnoc_snoc &slv_qns_sys_pcie>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,bcms = <&bcm_sh3>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qhm_audio: mas-qhm-audio { + cell-id = ; + label = "mas-qhm-audio"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_aggre_noc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn2>; + }; + + mas_qhm_blsp1: mas-qhm-blsp1 { + cell-id = ; + label = "mas-qhm-blsp1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_aggre_noc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn3>; + }; + + mas_qhm_qdss_bam: mas-qhm-qdss-bam { + cell-id = ; + label = "mas-qhm-qdss-bam"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <11>; + qcom,connections = <&slv_qhs_snoc_cfg + &slv_qhs_emac_cfg &slv_qhs_usb3 + &slv_qhs_tlmm &slv_qhs_spmi_fetcher + &slv_qhs_qdss_cfg &slv_qhs_pdm + &slv_qns_snoc_memnoc &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_qhs_spmi_vgi_coex + &slv_qhs_qpic &slv_qxs_imem + &slv_qhs_ipa &slv_qhs_usb3_phy + &slv_qhs_aop &slv_qhs_blsp1 + &slv_qhs_sdc1 &slv_qhs_mss_cfg + &slv_qhs_pcie_parf &slv_qhs_ecc_cfg + &slv_qhs_audio &slv_qhs_aoss + &slv_qhs_prng &slv_qhs_crypto0_cfg + &slv_xs_sys_tcu_cfg &slv_qhs_clk_ctl + &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn8>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qhm_qpic: mas-qhm-qpic { + cell-id = ; + label = "mas-qhm-qpic"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_aoss + &slv_qhs_ipa &slv_qns_aggre_noc + &slv_qhs_aop &slv_qhs_audio>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn3>; + }; + + mas_qhm_snoc_cfg: mas-qhm-snoc-cfg { + cell-id = ; + label = "mas-qhm-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_snoc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + mas_qhm_spmi_fetcher1: mas-qhm-spmi-fetcher1 { + cell-id = ; + label = "mas-qhm-spmi-fetcher1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_aoss + &slv_qns_aggre_noc &slv_qhs_aop>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn2>; + }; + + mas_qnm_aggre_noc: mas-qnm-aggre-noc { + cell-id = ; + label = "mas-qnm-aggre-noc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <12>; + qcom,connections = <&slv_xs_pcie + &slv_qhs_snoc_cfg &slv_qhs_sdc1 + &slv_qhs_tlmm &slv_qhs_spmi_fetcher + &slv_qhs_qdss_cfg &slv_qhs_pdm + &slv_qns_snoc_memnoc &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_qhs_spmi_vgi_coex + &slv_xs_qdss_stm &slv_qhs_qpic + &slv_qxs_imem &slv_qhs_ipa + &slv_qhs_usb3_phy &slv_qhs_aop + &slv_qhs_blsp1 &slv_qhs_usb3 + &slv_qhs_mss_cfg &slv_qhs_pcie_parf + &slv_qhs_ecc_cfg &slv_qhs_apss + &slv_qhs_audio &slv_qhs_aoss + &slv_qhs_prng &slv_qhs_crypto0_cfg + &slv_xs_sys_tcu_cfg &slv_qhs_clk_ctl + &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn7>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qnm_ipa: mas-qnm-ipa { + cell-id = ; + label = "mas-qnm-ipa"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <5>; + qcom,connections = <&slv_qhs_snoc_cfg + &slv_qhs_emac_cfg &slv_qhs_usb3 + &slv_qhs_aoss &slv_qhs_spmi_fetcher + &slv_qhs_qdss_cfg &slv_qhs_pdm + &slv_qns_snoc_memnoc &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_xs_qdss_stm + &slv_qhs_qpic &slv_qxs_imem + &slv_qhs_ipa &slv_qhs_usb3_phy + &slv_qhs_aop &slv_qhs_blsp1 + &slv_qhs_sdc1 &slv_qhs_mss_cfg + &slv_qhs_pcie_parf &slv_qhs_ecc_cfg + &slv_qhs_audio &slv_qhs_tlmm + &slv_qhs_prng &slv_qhs_crypto0_cfg + &slv_qhs_clk_ctl &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn11>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qnm_memnoc: mas-qnm-memnoc { + cell-id = ; + label = "mas-qnm-memnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_snoc_cfg + &slv_qhs_emac_cfg &slv_qhs_usb3 + &slv_qhs_tlmm &slv_qhs_spmi_fetcher + &slv_qhs_qdss_cfg &slv_qhs_pdm + &slv_qhs_tcsr &slv_qhs_ddrss_cfg + &slv_qhs_spmi_vgi_coex &slv_xs_qdss_stm + &slv_qhs_qpic &slv_qxs_imem + &slv_qhs_ipa &slv_qhs_usb3_phy + &slv_qhs_aop &slv_qhs_blsp1 + &slv_qhs_sdc1 &slv_qhs_mss_cfg + &slv_qhs_pcie_parf &slv_qhs_ecc_cfg + &slv_qhs_apss &slv_qhs_audio + &slv_qhs_aoss &slv_qhs_prng + &slv_qhs_crypto0_cfg &slv_xs_sys_tcu_cfg + &slv_qhs_clk_ctl &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn9>; + }; + + mas_qnm_memnoc_pcie: mas-qnm-memnoc-pcie { + cell-id = ; + label = "mas-qnm-memnoc-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_xs_pcie>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn10>; + }; + + mas_qxm_crypto: mas-qxm-crypto { + cell-id = ; + label = "mas-qxm-crypto"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qhs_aoss + &slv_qns_aggre_noc &slv_qhs_aop>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_ce0>, <&bcm_pn5>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_qxm_mss_nav_ce: mas-qxm-mss-nav-ce { + cell-id = ; + label = "mas-qxm-mss-nav-ce"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns_snoc_memnoc + &slv_qxs_imem &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn8>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_xm_emac: mas-xm-emac { + cell-id = ; + label = "mas-xm-emac"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <7>; + qcom,connections = <&slv_qns_aggre_noc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn7>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_ipa2pcie_slv: mas-xm-ipa2pcie-slv { + cell-id = ; + label = "mas-xm-ipa2pcie-slv"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <6>; + qcom,connections = <&slv_xs_pcie>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn11>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_pcie: mas-xm-pcie { + cell-id = ; + label = "mas-xm-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qns_aggre_noc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn7>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_xm_qdss_etr: mas-xm-qdss-etr { + cell-id = ; + label = "mas-xm-qdss-etr"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qhs_snoc_cfg + &slv_qhs_emac_cfg &slv_qhs_usb3 + &slv_qhs_tlmm &slv_qhs_spmi_fetcher + &slv_qhs_qdss_cfg &slv_qhs_pdm + &slv_qns_snoc_memnoc &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_qhs_spmi_vgi_coex + &slv_qhs_qpic &slv_qxs_imem + &slv_qhs_ipa &slv_qhs_usb3_phy + &slv_qhs_aop &slv_qhs_blsp1 + &slv_qhs_sdc1 &slv_qhs_mss_cfg + &slv_qhs_pcie_parf &slv_qhs_ecc_cfg + &slv_qhs_audio &slv_qhs_aoss + &slv_qhs_prng &slv_qhs_crypto0_cfg + &slv_xs_sys_tcu_cfg &slv_qhs_clk_ctl + &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn8>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_sdc1: mas-xm-sdc1 { + cell-id = ; + label = "mas-xm-sdc1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <8>; + qcom,connections = <&slv_qhs_aoss + &slv_qhs_ipa &slv_qns_aggre_noc + &slv_qhs_aop &slv_qhs_audio>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn1>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + mas_xm_usb3: mas-xm-usb3 { + cell-id = ; + label = "mas-xm-usb3"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <4>; + qcom,connections = <&slv_qns_aggre_noc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn7>; + qcom,ap-owned; + qcom,prio = <0>; + }; + + /*Internal nodes*/ + + /*Slaves*/ + + slv_ipa_core_slave:slv-ipa-core-slave { + cell-id = ; + label = "slv-ipa-core-slave"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_ipa_virt>; + qcom,bcms = <&bcm_ip0>; + }; + + slv_ebi:slv-ebi { + cell-id = ; + label = "slv-ebi"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mc_virt>; + qcom,bcms = <&bcm_mc0>; + }; + + slv_qns_llcc:slv-qns-llcc { + cell-id = ; + label = "slv-qns-llcc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,connections = <&mas_llcc_mc>; + qcom,bcms = <&bcm_sh0>; + }; + + slv_qns_memnoc_snoc:slv-qns-memnoc-snoc { + cell-id = ; + label = "slv-qns-memnoc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,connections = <&mas_qnm_memnoc>; + qcom,bcms = <&bcm_sh4>; + }; + + slv_qns_sys_pcie:slv-qns-sys-pcie { + cell-id = ; + label = "slv-qns-sys-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mem_noc>; + qcom,connections = <&mas_qnm_memnoc_pcie>; + qcom,bcms = <&bcm_sh4>; + }; + + slv_qhs_aop:slv-qhs-aop { + cell-id = ; + label = "slv-qhs-aop"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_aoss:slv-qhs-aoss { + cell-id = ; + label = "slv-qhs-aoss"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_apss:slv-qhs-apss { + cell-id = ; + label = "slv-qhs-apss"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_audio:slv-qhs-audio { + cell-id = ; + label = "slv-qhs-audio"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_blsp1:slv-qhs-blsp1 { + cell-id = ; + label = "slv-qhs-blsp1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_clk_ctl:slv-qhs-clk-ctl { + cell-id = ; + label = "slv-qhs-clk-ctl"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg { + cell-id = ; + label = "slv-qhs-crypto0-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg { + cell-id = ; + label = "slv-qhs-ddrss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_ecc_cfg:slv-qhs-ecc-cfg { + cell-id = ; + label = "slv-qhs-ecc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_emac_cfg:slv-qhs-emac-cfg { + cell-id = ; + label = "slv-qhs-emac-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_imem_cfg:slv-qhs-imem-cfg { + cell-id = ; + label = "slv-qhs-imem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_ipa:slv-qhs-ipa { + cell-id = ; + label = "slv-qhs-ipa"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_mss_cfg:slv-qhs-mss-cfg { + cell-id = ; + label = "slv-qhs-mss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_pcie_parf:slv-qhs-pcie-parf { + cell-id = ; + label = "slv-qhs-pcie-parf"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_pdm:slv-qhs-pdm { + cell-id = ; + label = "slv-qhs-pdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_prng:slv-qhs-prng { + cell-id = ; + label = "slv-qhs-prng"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_qdss_cfg:slv-qhs-qdss-cfg { + cell-id = ; + label = "slv-qhs-qdss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_qpic:slv-qhs-qpic { + cell-id = ; + label = "slv-qhs-qpic"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_sdc1:slv-qhs-sdc1 { + cell-id = ; + label = "slv-qhs-sdc1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_snoc_cfg:slv-qhs-snoc-cfg { + cell-id = ; + label = "slv-qhs-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qhm_snoc_cfg>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_spmi_fetcher:slv-qhs-spmi-fetcher { + cell-id = ; + label = "slv-qhs-spmi-fetcher"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_spmi_vgi_coex:slv-qhs-spmi-vgi-coex { + cell-id = ; + label = "slv-qhs-spmi-vgi-coex"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_tcsr:slv-qhs-tcsr { + cell-id = ; + label = "slv-qhs-tcsr"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_tlmm:slv-qhs-tlmm { + cell-id = ; + label = "slv-qhs-tlmm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_usb3:slv-qhs-usb3 { + cell-id = ; + label = "slv-qhs-usb3"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qhs_usb3_phy:slv-qhs-usb3-phy { + cell-id = ; + label = "slv-qhs-usb3-phy"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_qns_aggre_noc:slv-qns-aggre-noc { + cell-id = ; + label = "slv-qns-aggre-noc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_aggre_noc>; + qcom,bcms = <&bcm_sn7>; + }; + + slv_qns_snoc_memnoc:slv-qns-snoc-memnoc { + cell-id = ; + label = "slv-qns-snoc-memnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc_gc>; + qcom,bcms = <&bcm_sn0>; + }; + + slv_qxs_imem:slv-qxs-imem { + cell-id = ; + label = "slv-qxs-imem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn1>; + }; + + slv_srvc_snoc:slv-srvc-snoc { + cell-id = ; + label = "slv-srvc-snoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_pn0>; + }; + + slv_xs_pcie:slv-xs-pcie { + cell-id = ; + label = "slv-xs-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn6>; + }; + + slv_xs_qdss_stm:slv-xs-qdss-stm { + cell-id = ; + label = "slv-xs-qdss-stm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn3>; + }; + + slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg { + cell-id = ; + label = "slv-xs-sys-tcu-cfg"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn4>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-gdsc.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-gdsc.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..3b07638aebc119b412e7805c2b13089323b003a3 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-gdsc.dtsi @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + /* GDSCs in Global CC */ + gdsc_usb30: qcom,gdsc@10b004 { + compatible = "qcom,gdsc"; + regulator-name = "gdsc_usb30"; + reg = <0x10b004 0x4>; + status = "disabled"; + }; + + gdsc_emac: qcom,gdsc@147004 { + compatible = "qcom,gdsc"; + regulator-name = "gdsc_emac"; + reg = <0x147004 0x4>; + status = "disabled"; + }; + + gdsc_pcie: qcom,qgdsc@137004 { + compatible = "qcom,gdsc"; + regulator-name = "gdsc_pcie"; + reg = <0x137004 0x4>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ba09309adbcf343adfc0a43e561c1a7c6590ace2 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi @@ -0,0 +1,195 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + /* Stub regulators */ + + /* PMXPRAIRIE S1 + S6 = VDD_MODEM supply */ + VDD_MODEM_LEVEL: S1E_LEVEL: + pmxprairie_s1_level: regulator-pmxprairie-s1-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s1_level"; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + S2E: pmxprairie_s2: regulator-pmxprairie-s2 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s2"; + regulator-min-microvolt = <1224000>; + regulator-max-microvolt = <1400000>; + }; + + S3E: pmxprairie_s3: regulator-pmxprairie-s3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s3"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1000000>; + }; + + S4E: pmxprairie_s4: regulator-pmxprairie-s4 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s4"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1956000>; + }; + + /* PMXPRAIRIE S5 = VDD_CX supply */ + VDD_CX_LEVEL_AO: S5E_LEVEL_AO: pmxprairie_s5_level_ao: + VDD_CX_LEVEL: S5E_LEVEL: + pmxprairie_s5_level: regulator-pmxprairie-s5-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s5_level"; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + /* PMXPRAIRIE S7 = VDD_MX supply */ + VDD_MX_LEVEL_AO: S7E_LEVEL_AO: pmxprairie_s7_level_ao: + VDD_MX_LEVEL: S7E_LEVEL: + pmxprairie_s7_level: regulator-pmxprairie-s7-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_s7_level"; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + L1E: pmxprairie_l1: regulator-pmxprairie-l1 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l1"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + qcom,hpm-min-load = <30000>; + }; + + L2E: pmxprairie_l2: regulator-pmxprairie-l2 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l2"; + regulator-min-microvolt = <1128000>; + regulator-max-microvolt = <1128000>; + qcom,hpm-min-load = <30000>; + }; + + L3E: pmxprairie_l3: regulator-pmxprairie-l3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l3"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + qcom,hpm-min-load = <30000>; + }; + + L4E: pmxprairie_l4: regulator-pmxprairie-l4 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l4"; + regulator-min-microvolt = <872000>; + regulator-max-microvolt = <872000>; + qcom,hpm-min-load = <30000>; + }; + + L5E: pmxprairie_l5: regulator-pmxprairie-l5 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l5"; + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1704000>; + qcom,hpm-min-load = <10000>; + }; + + L6E: pmxprairie_l6: regulator-pmxprairie-l6 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l6"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + qcom,hpm-min-load = <10000>; + }; + + L7E: pmxprairie_l7: regulator-pmxprairie-l7 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l7"; + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + qcom,hpm-min-load = <30000>; + }; + + L8E: pmxprairie_l8: regulator-pmxprairie-l8 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l8"; + regulator-min-microvolt = <480000>; + regulator-max-microvolt = <900000>; + qcom,hpm-min-load = <30000>; + }; + + L9E: pmxprairie_l9: regulator-pmxprairie-l9 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l9"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <800000>; + qcom,hpm-min-load = <30000>; + }; + + L10E: pmxprairie_l10: regulator-pmxprairie-l10 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l10"; + regulator-min-microvolt = <3088000>; + regulator-max-microvolt = <3088000>; + qcom,hpm-min-load = <10000>; + }; + + L11E: pmxprairie_l11: regulator-pmxprairie-l11 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l11"; + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <3000000>; + qcom,hpm-min-load = <10000>; + }; + + L12E: pmxprairie_l12: regulator-pmxprairie-l12 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l12"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + qcom,hpm-min-load = <30000>; + }; + + L13E: pmxprairie_l13: regulator-pmxprairie-l13 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l13"; + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <3000000>; + qcom,hpm-min-load = <10000>; + }; + + L14E: pmxprairie_l14: regulator-pmxprairie-l14 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l14"; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <800000>; + qcom,hpm-min-load = <30000>; + }; + + L15E: pmxprairie_l15: regulator-pmxprairie-l15 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l15"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + qcom,hpm-min-load = <30000>; + }; + + L16E: pmxprairie_l16: regulator-pmxprairie-l16 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmxprairie_l16"; + regulator-min-microvolt = <1704000>; + regulator-max-microvolt = <1904000>; + qcom,hpm-min-load = <10000>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi index 97d17fe9db290f90cc047bf43dea9c24dcdd9c99..8fb9a18219b1e4d240fd88d5759ebed0fc3e8b2d 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -63,6 +63,7 @@ soc: soc { }; }; +#include "sdxprairie-regulator.dtsi" &soc { #address-cells = <1>; @@ -201,6 +202,13 @@ qcom,rmnet-ipa-ssr; }; + qcom,ipa_fws { + compatible = "qcom,pil-tz-generic"; + qcom,pas-id = <0xf>; + qcom,firmware-name = "ipa_fws"; + qcom,pil-force-shutdown; + }; + ipa_hw: qcom,ipa@01e00000 { compatible = "qcom,ipa"; reg = <0x1e00000 0xc0000>, @@ -256,4 +264,19 @@ #include "sdxprairie-pinctrl.dtsi" #include "sdxprairie-ion.dtsi" +#include "sdxprairie-bus.dtsi" #include "msm-arm-smmu-sdxprairie.dtsi" +#include "sdxprairie-gdsc.dtsi" + +&gdsc_usb30 { + status = "ok"; +}; + +&gdsc_emac { + status = "ok"; +}; + +&gdsc_pcie { + status = "ok"; +}; + diff --git a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi index f4883c37dbbdd733ecc6b05c31d7d4c4adb023ee..007d7d1a3bab96e2eb0d2fc750d0cec39b3493c8 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi @@ -60,6 +60,7 @@ <&clock_audio_rx_2 0>; qcom,rx-swr-gpios = <&rx_swr_gpios>; qcom,rx_mclk_mode_muxsel = <0x62c25020>; + qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>; swr1: rx_swr_master { compatible = "qcom,swr-mstr"; #address-cells = <2>; @@ -90,6 +91,7 @@ clocks = <&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>; qcom,wsa-swr-gpios = <&wsa_swr_gpios>; + qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>; swr0: wsa_swr_master { compatible = "qcom,swr-mstr"; #address-cells = <2>; @@ -138,11 +140,57 @@ clock-names = "va_core_clk"; clocks = <&clock_audio_va 0>; }; + + wcd937x_codec: wcd937x-codec { + compatible = "qcom,wcd937x-codec"; + qcom,rx_swr_ch_map = <0 HPH_L 0x1 0 HPH_L>, + <0 HPH_R 0x2 0 HPH_R>, <1 CLSH 0x3 0 CLSH>, + <2 COMP_L 0x1 0 COMP_L>, <2 COMP_R 0x2 0 COMP_R>, + <3 LO 0x1 0 LO>, <4 DSD_L 0x1 0 DSD_L>, + <4 DSD_R 0x2 0 DSD_R>; + qcom,tx_swr_ch_map = <0 ADC1 0x1 0 ADC1>, + <1 ADC2 0x1 0 ADC3>, <1 ADC3 0x2 0 ADC4>, + <2 DMIC0 0x1 0 DMIC0>, <2 DMIC1 0x2 0 DMIC1>, + <2 MBHC 0x4 0 DMIC2>, <3 DMIC2 0x1 0 DMIC4>, + <3 DMIC3 0x2 0 DMIC5>, <3 DMIC4 0x4 0 DMIC6>, + <3 DMIC5 0x8 0 DMIC7>; + + qcom,wcd-rst-gpio-node = <&wcd937x_rst_gpio>; + qcom,rx-slave = <&wcd937x_rx_slave>; + qcom,tx-slave = <&wcd937x_tx_slave>; + + cdc-vdd-ldo-rxtx-supply = <&L10A>; + qcom,cdc-vdd-ldo-rxtx-voltage = <1800000 1800000>; + qcom,cdc-vdd-ldo-rxtx-current = <25000>; + + cdc-vddpx-1-supply = <&L10A>; + qcom,cdc-vddpx-1-voltage = <1800000 1800000>; + qcom,cdc-vddpx-1-current = <10000>; + + cdc-vdd-buck-supply = <&L15A>; + qcom,cdc-vdd-buck-voltage = <1800000 1800000>; + qcom,cdc-vdd-buck-current = <650000>; + + cdc-vdd-mic-bias-supply = <&BOB>; + qcom,cdc-vdd-mic-bias-voltage = <3296000 3296000>; + qcom,cdc-vdd-mic-bias-current = <25000>; + + qcom,cdc-micbias1-mv = <1800>; + qcom,cdc-micbias2-mv = <1800>; + qcom,cdc-micbias3-mv = <1800>; + + qcom,cdc-static-supplies = "cdc-vdd-ldo-rxtx", + "cdc-vddpx-1", + "cdc-vdd-buck", + "cdc-vdd-mic-bias"; + }; + }; &sm6150_snd { qcom,model = "sm6150-idp-snd-card"; qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>; + qcom,ext-disp-audio-rx = <1>; qcom,audio-routing = "AMIC2", "MIC BIAS2", "MIC BIAS2", "Analog Mic2", @@ -183,61 +231,20 @@ qcom,msm-mbhc-gnd-swh = <1>; qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; - asoc-codec = <&stub_codec>, <&bolero>; - asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + asoc-codec = <&stub_codec>, <&bolero>, <&ext_disp_audio_codec>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec", + "msm-ext-disp-audio-codec-rx"; qcom,wsa-max-devs = <2>; qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, <&wsa881x_0213>, <&wsa881x_0214>; qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", "SpkrLeft", "SpkrRight"; qcom,codec-aux-devs = <&wcd937x_codec>; + qcom,msm_audio_ssr_devs = <&audio_apr>, <&q6core>, + <&lpi_tlmm>, <&bolero>; }; &soc { - wcd937x_codec: wcd937x-codec { - compatible = "qcom,wcd937x-codec"; - qcom,rx_swr_ch_map = <0 HPH_L 0x1 0 HPH_L>, - <0 HPH_R 0x2 0 HPH_R>, <1 CLSH 0x3 0 CLSH>, - <2 COMP_L 0x1 0 COMP_L>, <2 COMP_R 0x2 0 COMP_R>, - <3 LO 0x1 0 LO>, <4 DSD_L 0x1 0 DSD_L>, - <4 DSD_R 0x2 0 DSD_R>; - qcom,tx_swr_ch_map = <0 ADC1 0x1 0 ADC1>, - <1 ADC2 0x1 0 ADC3>, <1 ADC3 0x2 0 ADC4>, - <2 DMIC0 0x1 0 DMIC0>, <2 DMIC1 0x2 0 DMIC1>, - <2 MBHC 0x4 0 DMIC2>, <3 DMIC2 0x1 0 DMIC4>, - <3 DMIC3 0x2 0 DMIC5>, <3 DMIC4 0x4 0 DMIC6>, - <3 DMIC5 0x8 0 DMIC7>; - - qcom,wcd-rst-gpio-node = <&wcd937x_rst_gpio>; - qcom,rx-slave = <&wcd937x_rx_slave>; - qcom,tx-slave = <&wcd937x_tx_slave>; - - cdc-vdd-ldo-rxtx-supply = <&L10A>; - qcom,cdc-vdd-ldo-rxtx-voltage = <1800000 1800000>; - qcom,cdc-vdd-ldo-rxtx-current = <25000>; - - cdc-vddpx-1-supply = <&L10A>; - qcom,cdc-vddpx-1-voltage = <1800000 1800000>; - qcom,cdc-vddpx-1-current = <10000>; - - cdc-vdd-buck-supply = <&L15A>; - qcom,cdc-vdd-buck-voltage = <1800000 1800000>; - qcom,cdc-vdd-buck-current = <650000>; - - cdc-vdd-mic-bias-supply = <&BOB>; - qcom,cdc-vdd-mic-bias-voltage = <3296000 3296000>; - qcom,cdc-vdd-mic-bias-current = <25000>; - - qcom,cdc-micbias1-mv = <1800>; - qcom,cdc-micbias2-mv = <1800>; - qcom,cdc-micbias3-mv = <1800>; - - qcom,cdc-static-supplies = "cdc-vdd-ldo-rxtx", - "cdc-vddpx-1", - "cdc-vdd-buck", - "cdc-vdd-mic-bias"; - }; - cdc_dmic01_gpios: cdc_dmic01_pinctrl { compatible = "qcom,msm-cdc-pinctrl"; pinctrl-names = "aud_active", "aud_sleep"; @@ -367,7 +374,7 @@ clock_audio_rx_1: rx_core_clk { compatible = "qcom,audio-ref-clk"; qcom,codec-ext-clk-src = ; - qcom,codec-lpass-ext-clk-freq = <19200000>; + qcom,codec-lpass-ext-clk-freq = <22579200>; qcom,codec-lpass-clk-id = <0x30E>; #clock-cells = <1>; }; @@ -375,7 +382,7 @@ clock_audio_rx_2: rx_npl_clk { compatible = "qcom,audio-ref-clk"; qcom,codec-ext-clk-src = ; - qcom,codec-lpass-ext-clk-freq = <19200000>; + qcom,codec-lpass-ext-clk-freq = <22579200>; qcom,codec-lpass-clk-id = <0x30F>; #clock-cells = <1>; }; @@ -458,7 +465,7 @@ cdc-vdd-mic-bias-supply = <&BOB>; qcom,cdc-vdd-mic-bias-voltage = <3296000 3296000>; - qcom,cdc-vdd-mic-bias-current = <30400>; + qcom,cdc-vdd-mic-bias-current = <30400 1000001>; qcom,cdc-static-supplies = "cdc-vdd-buck", "cdc-buck-sido", diff --git a/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi b/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi index 3597b81d2608c989bb927ea8a2703b499dcb5cb9..5649e385daabbf183ffd9f17a4fb5fc8bcde5e8b 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-audio.dtsi @@ -134,6 +134,7 @@ "msm-dai-cdc-dma-dev.45115", "msm-dai-cdc-dma-dev.45116", "msm-dai-cdc-dma-dev.45118"; + fsa4480-i2c-handle = <&fsa4480>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-adp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-adp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..30153ed84ec4052a08f3cf486dbf39008db8f9fa --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-adp.dtsi @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&cam_cci { + qcom,cam-res-mgr { + compatible = "qcom,cam-res-mgr"; + status = "ok"; + }; + + qcom,cam-sensor@0 { + cell-index = <0>; + compatible = "qcom,cam-sensor"; + reg = <0x0>; + csiphy-sd-index = <0>; + sensor-position-roll = <90>; + sensor-position-pitch = <0>; + sensor-position-yaw = <180>; + + + cam_vio-supply = <&pm6155_1_s4>; + cam_vana-supply = <&pm6155_1_s4>; + cam_vdig-supply = <&pm6155_1_s4>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + + gpios = <&tlmm 43 0>, + <&tlmm 28 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0"; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + qcom,cam-sensor@1 { + cell-index = <1>; + compatible = "qcom,cam-sensor"; + reg = <0x1>; + csiphy-sd-index = <1>; + sensor-position-roll = <90>; + sensor-position-pitch = <0>; + sensor-position-yaw = <180>; + + cam_vio-supply = <&pm6155_1_s4>; + cam_vana-supply = <&pm6155_1_s4>; + cam_vdig-supply = <&pm6155_1_s4>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + + gpios = <&tlmm 31 0>, + <&tlmm 29 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1"; + sensor-mode = <0>; + cci-master = <0>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK1_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; + + qcom,cam-sensor@2 { + cell-index = <2>; + compatible = "qcom,cam-sensor"; + reg = <0x02>; + csiphy-sd-index = <2>; + sensor-position-roll = <270>; + sensor-position-pitch = <0>; + sensor-position-yaw = <0>; + + cam_vio-supply = <&pm6155_1_s4>; + cam_vana-supply = <&pm6155_1_s4>; + cam_vdig-supply = <&pm6155_1_s4>; + cam_clk-supply = <&titan_top_gdsc>; + regulator-names = "cam_vio", "cam_vana", "cam_vdig", + "cam_clk"; + gpios = <&tlmm 31 0>, + <&tlmm 30 0>; + gpio-reset = <1>; + gpio-req-tbl-num = <0 1>; + gpio-req-tbl-flags = <1 0>; + gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2"; + sensor-mode = <0>; + cci-master = <1>; + status = "ok"; + clocks = <&clock_camcc CAM_CC_MCLK2_CLK>; + clock-names = "cam_clk"; + clock-cntl-level = "turbo"; + clock-rates = <24000000>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi index b3e5fee63fce6c92f7764ea2fc2cc8cb1c4db8cc..6896e568e4272ceebdec3840249b9d7f74087127 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-camera-sensor-idp.dtsi @@ -17,9 +17,9 @@ cell-index = <0>; reg = <0x00 0x00>; compatible = "qcom,camera-flash"; - flash-source = <&pm6150l_flash0>; - torch-source = <&pm6150l_torch1>; - switch-source = <&pm6150l_switch0>; + flash-source = <&pm6150l_flash0 &pm6150l_flash1>; + torch-source = <&pm6150l_torch0 &pm6150l_torch1>; + switch-source = <&pm6150l_switch2 &pm6150l_switch2>; status = "ok"; }; @@ -27,9 +27,9 @@ cell-index = <1>; reg = <0x01 0x00>; compatible = "qcom,camera-flash"; - flash-source = <&pm6150l_flash1>; - torch-source = <&pm6150l_torch1>; - switch-source = <&pm6150l_switch1>; + flash-source = <&pm6150l_flash0 &pm6150l_flash1>; + torch-source = <&pm6150l_torch0 &pm6150l_torch1>; + switch-source = <&pm6150l_switch2 &pm6150l_switch2>; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi b/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi index 626215292c82ba280e2c30d723af1c91649da7fd..70a0a54950acfe5920b5d10eaf7885743840f53e 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-camera.dtsi @@ -625,9 +625,9 @@ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>, <&clock_camcc CAM_CC_IFE_0_AXI_CLK>; clock-rates = - <0 0 0 0 0 0 200000000 0 0 0 360000000 0 0>, + <0 0 0 0 0 0 320000000 0 0 0 432000000 0 0>, <0 0 0 0 0 0 540000000 0 0 0 600000000 0 0>; - clock-cntl-level = "svs", "turbo"; + clock-cntl-level = "svs_l1", "turbo"; src-clock-name = "ife_csid_clk_src"; status = "ok"; }; @@ -712,9 +712,9 @@ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>, <&clock_camcc CAM_CC_IFE_1_AXI_CLK>; clock-rates = - <0 0 0 0 0 0 200000000 0 0 0 360000000 0 0>, + <0 0 0 0 0 0 320000000 0 0 0 432000000 0 0>, <0 0 0 0 0 0 540000000 0 0 0 600000000 0 0>; - clock-cntl-level = "svs", "turbo"; + clock-cntl-level = "svs_l1", "turbo"; src-clock-name = "ife_csid_clk_src"; status = "ok"; }; @@ -796,9 +796,9 @@ <&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>, <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>; clock-rates = - <0 0 0 0 0 0 200000000 0 0 0 360000000 0>, + <0 0 0 0 0 0 320000000 0 0 0 432000000 0 0>, <0 0 0 0 0 0 540000000 0 0 0 600000000 0>; - clock-cntl-level = "svs", "turbo"; + clock-cntl-level = "svs_l1", "turbo"; src-clock-name = "ife_csid_clk_src"; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts index efab136e8944b8a68da7bac41001d8bb52fac80c..8ac43d86f11f1e8d1f93a2ca6e4e72620ef9933d 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp-overlay.dts @@ -20,7 +20,7 @@ #include "sm6150-audio-overlay.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 Command mode display IDP"; + model = "Command mode display IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,msm-id = <355 0x0>; qcom,board-id = <34 3>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp.dts b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp.dts index c8c7110cbebe509759a9579ff17ff421763807f7..3cc83ced91fbbd4b1ceb797921f775a3a0366c24 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-cmd-mode-display-idp.dts @@ -16,7 +16,7 @@ #include "sm6150-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 Command mode display IDP"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 Command mode display IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,board-id = <34 3>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi index be03427c5ba02cb1217a5d42e152796299813fe1..76312ea936ce4d5fface212e936fef0f0917ba82 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi @@ -520,10 +520,10 @@ port@1 { reg = <0>; - funnel_monaq_in_tpdm_dl_monaq: endpoint { + funnel_monaq_in_tpdm_monaq: endpoint { slave-mode; remote-endpoint = - <&tpdm_dl_monaq_out_funnel_monaq>; + <&tpdm_monaq_out_funnel_monaq>; }; }; }; @@ -703,21 +703,21 @@ }; }; - tpdm_dl_monaq: tpdm@69c0000 { + tpdm_monaq: tpdm@69c0000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x0003b968>; reg = <0x69c0000 0x1000>; reg-names = "tpdm-base"; - coresight-name = "coresight-tpdm-dl-monaq"; + coresight-name = "coresight-tpdm-monaq"; clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; port { - tpdm_dl_monaq_out_funnel_monaq: endpoint { + tpdm_monaq_out_funnel_monaq: endpoint { remote-endpoint = - <&funnel_monaq_in_tpdm_dl_monaq>; + <&funnel_monaq_in_tpdm_monaq>; }; }; }; @@ -1171,15 +1171,6 @@ }; port@1 { - reg = <5>; - funnel_swao_in_funnel_ssc: endpoint { - slave-mode; - remote-endpoint= - <&funnel_ssc_out_funnel_swao>; - }; - }; - - port@2 { reg = <6>; funnel_swao_in_replicator1_out: endpoint { slave-mode; @@ -1187,7 +1178,8 @@ <&replicator1_out_funnel_swao>; }; }; - port@3 { + + port@2 { reg = <7>; funnel_swao_in_tpda_swao: endpoint { slave-mode; @@ -1198,82 +1190,6 @@ }; }; - funnel_ssc: funnel@6b14000 { - compatible = "arm,primecell"; - arm,primecell-periphid = <0x0003b908>; - - reg = <0x6b14000 0x1000>; - reg-names = "funnel-base"; - - coresight-name = "coresight-funnel-ssc"; - - clocks = <&clock_aop QDSS_CLK>; - clock-names = "apb_pclk"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - funnel_ssc_out_funnel_swao: endpoint { - remote-endpoint = - <&funnel_swao_in_funnel_ssc>; - }; - }; - - port@1 { - reg = <0>; - funnel_ssc_in_ssc_etm0: endpoint { - slave-mode; - remote-endpoint = - <&ssc_etm0_out_funnel_ssc>; - }; - }; - - port@2 { - reg = <0>; - funnel_ssc_in_ssc_stm: endpoint { - slave-mode; - remote-endpoint = - <&ssc_stm_out_funnel_ssc>; - }; - }; - }; - }; - - ssc_stm: stm@6b13000 { - compatible = "arm,primecell"; - arm,primecell-periphid = <0x0003b962>; - - reg = <0x06b13000 0x1000>; - reg-names = "stm-base"; - coresight-name = "coresight-ssc-stm"; - - clocks = <&clock_aop QDSS_CLK>; - clock-names = "apb_pclk"; - - port { - ssc_stm_out_funnel_ssc: endpoint { - remote-endpoint = <&funnel_ssc_in_ssc_stm>; - }; - }; - }; - - ssc_etm0 { - compatible = "qcom,coresight-remote-etm"; - - coresight-name = "coresight-ssc-etm0"; - qcom,inst-id = <8>; - - port { - ssc_etm0_out_funnel_ssc: endpoint { - remote-endpoint = - <&funnel_ssc_in_ssc_etm0>; - }; - }; - }; - tpda_swao: tpda@6b01000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x0003b969>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts index 345251a025aee913861cf7fea90333600a975b15..473c59fdefe8903e1144c9061e36ec42df5b8c52 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp-overlay.dts @@ -21,7 +21,7 @@ #include "sm6150-external-codec.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 External Audio Codec IDP"; + model = "External Audio Codec IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,msm-id = <355 0x0>; qcom,board-id = <34 1>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp.dts index d86f77e2e1a7d60e2f6099504170e865f95d6c9e..47cef042e62cebdcb4695e95d3c8892999e14320 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-external-codec-idp.dts @@ -18,7 +18,7 @@ #include "sm6150-external-codec.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 External Audio Codec IDP"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 External Audio Codec IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,board-id = <34 1>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi b/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi index cf0c52ce43b3592b9e9bd5c97f422e98abe09a40..04b1fdd55ec43ffe10cfadd699ef274cfdcec8f6 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-external-codec.dtsi @@ -17,6 +17,7 @@ &sm6150_snd { qcom,model = "sm6150-tavil-snd-card"; qcom,tavil_codec = <1>; + qcom,ext-disp-audio-rx = <1>; qcom,audio-routing = "AIF4 VI", "MCLK", "RX_BIAS", "MCLK", @@ -91,8 +92,8 @@ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", "msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929"; - asoc-codec = <&stub_codec>; - asoc-codec-names = "msm-stub-codec.1"; + asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>; + asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx"; qcom,hph-en0-gpio = <&tavil_hph_en0>; qcom,hph-en1-gpio = <&tavil_hph_en1>; qcom,wsa-max-devs = <2>; @@ -100,6 +101,8 @@ <&wsa881x_13>, <&wsa881x_14>; qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", "SpkrLeft", "SpkrRight"; + qcom,msm_audio_ssr_devs = <&audio_apr>, <&wcd934x_cdc>, + <&q6core>, <&lpi_tlmm>; }; &slim_aud { diff --git a/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi index 2fdcc6aedc36fb0afa2e3bc57035fc5078497a02..d8670e75108796773daef8810820d51d78a0f491 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi @@ -98,6 +98,8 @@ /* GDSC oxili regulators */ vddcx-supply = <&gpu_cx_gdsc>; vdd-supply = <&gpu_gx_gdsc>; + /* Cx ipeak limit supprt */ + qcom,gpu-cx-ipeak = <&cx_ipeak_lm 1>; /* CPU latency parameter */ qcom,pm-qos-active-latency = <67>; @@ -162,7 +164,7 @@ qcom,speed-bin = <0>; - qcom,initial-pwrlevel = <3>; + qcom,initial-pwrlevel = <5>; qcom,ca-target-pwrlevel = <3>; /* SVS_L1 */ @@ -177,7 +179,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@1 { reg = <1>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -186,7 +188,7 @@ /* NOM */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -195,7 +197,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -204,7 +206,7 @@ /* SVS */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; @@ -235,7 +237,7 @@ qcom,speed-bin = <177>; - qcom,initial-pwrlevel = <3>; + qcom,initial-pwrlevel = <5>; qcom,ca-target-pwrlevel = <3>; /* SVS_L1 */ @@ -250,7 +252,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@1 { reg = <1>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -259,7 +261,7 @@ /* NOM */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -268,7 +270,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -277,7 +279,7 @@ /* SVS */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; @@ -308,7 +310,7 @@ qcom,speed-bin = <187>; - qcom,initial-pwrlevel = <3>; + qcom,initial-pwrlevel = <6>; qcom,ca-target-pwrlevel = <4>; qcom,gpu-pwrlevel@0 { @@ -330,7 +332,7 @@ /* NOM L1 */ qcom,gpu-pwrlevel@2 { reg = <2>; - qcom,gpu-freq = <706000000>; + qcom,gpu-freq = <745000000>; qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; @@ -339,7 +341,7 @@ /* NOM */ qcom,gpu-pwrlevel@3 { reg = <3>; - qcom,gpu-freq = <645000000>; + qcom,gpu-freq = <700000000>; qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; @@ -348,7 +350,7 @@ /* SVS L1 */ qcom,gpu-pwrlevel@4 { reg = <4>; - qcom,gpu-freq = <513000000>; + qcom,gpu-freq = <550000000>; qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; @@ -357,7 +359,7 @@ /* SVS */ qcom,gpu-pwrlevel@5 { reg = <5>; - qcom,gpu-freq = <400000000>; + qcom,gpu-freq = <435000000>; qcom,bus-freq = <7>; qcom,bus-min = <5>; qcom,bus-max = <8>; @@ -414,4 +416,32 @@ iommus = <&kgsl_smmu 0x2 0x400>; }; }; + + rgmu: qcom,rgmu@0x0506d000 { + label = "kgsl-rgmu"; + compatible = "qcom,gpu-rgmu"; + + reg = <0x506d000 0x31000>; + reg-names = "kgsl_rgmu"; + + interrupts = <0 304 0>, <0 305 0>; + interrupt-names = "kgsl_oob", "kgsl_rgmu"; + + regulator-names = "vddcx", "vdd"; + vddcx-supply = <&gpu_cx_gdsc>; + vdd-supply = <&gpu_gx_gdsc>; + + clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>, + <&clock_gpucc GPU_CC_CXO_CLK>, + <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, + <&clock_gpucc GPU_CC_AHB_CLK>, + <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, + <&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>, + <&clock_gpucc GPU_CC_GX_GFX3D_CLK>; + + clock-names = "gmu", "rbbmtimer", "mem", + "iface", "mem_iface", + "alt_mem_iface", "core"; + + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts index 9eaf6e4711efe9685a47a4a79a1278d498a99130..d8bb219b60ed245fbfc40ace995ecf43fb83eec0 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts @@ -20,7 +20,7 @@ #include "sm6150-audio-overlay.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 IDP"; + model = "IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,msm-id = <355 0x0>; qcom,board-id = <34 0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp.dts b/arch/arm64/boot/dts/qcom/sm6150-idp.dts index ca1ac2c999370fc9be9211d2b851f43e5b63f28e..66014ab30161cfce54de8af9b3dc2aadd9ebde03 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-idp.dts @@ -16,7 +16,7 @@ #include "sm6150-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 IDP"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,board-id = <34 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi index 193b92ec447852fb990d58a2706776245f28c06e..d3532c0aed6239d8265a1d0c377b2570600d9986 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi @@ -186,10 +186,14 @@ &pm6150_charger { io-channels = <&pm6150_vadc ADC_USB_IN_V_16>, <&pm6150_vadc ADC_USB_IN_I>, - <&pm6150_vadc ADC_CHG_TEMP>; + <&pm6150_vadc ADC_CHG_TEMP>, + <&pm6150_vadc ADC_DIE_TEMP>, + <&pm6150_vadc ADC_AMUX_THM4_PU2>; io-channel-names = "usb_in_voltage", "usb_in_current", - "chg_temp"; + "chg_temp", + "die_temp", + "conn_temp"; qcom,battery-data = <&mtp_batterydata>; qcom,step-charging-enable; qcom,sw-jeita-enable; @@ -259,3 +263,9 @@ qcom,platform-te-gpio = <&tlmm 90 0>; qcom,platform-reset-gpio = <&tlmm 91 0>; }; + +&thermal_zones { + quiet-therm-step { + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi index c8e19f7ac8ff112ddfe88759effac85de8725a42..2b30158946bf74db68bb450f32a068e5054715e9 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi @@ -122,7 +122,7 @@ config { pins = "gpio4", "gpio5"; drive-strength = <2>; - bias-pull-up; + bias-no-pull; }; }; }; @@ -188,6 +188,47 @@ }; }; + fpc_reset_int { + fpc_reset_low: reset_low { + mux { + pins = "gpio101"; + function = "fpc_reset_gpio_low"; + }; + config { + pins = "gpio101"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + + fpc_reset_high: reset_high { + mux { + pins = "gpio101"; + function = "fpc_reset_gpio_high"; + }; + + config { + pins = "gpio101"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; + + fpc_int_low: int_low { + mux { + pins = "gpio93"; + }; + config { + pins = "gpio93"; + drive-strength = <2>; + bias-pull-down; + input-enable; + }; + }; + }; + /* SE 3 pin mappings */ qupv3_se3_i2c_pins: qupv3_se3_i2c_pins { qupv3_se3_i2c_active: qupv3_se3_i2c_active { @@ -827,37 +868,6 @@ }; }; - /* USB C analog configuration */ - wcd_usbc_analog_en1 { - wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle { - mux { - pins = "gpio49"; - function = "gpio"; - }; - - config { - pins = "gpio49"; - drive-strength = <2>; - bias-pull-down; - output-low; - }; - }; - - wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active { - mux { - pins = "gpio49"; - function = "gpio"; - }; - - config { - pins = "gpio49"; - drive-strength = <2>; - bias-disable; - output-high; - }; - }; - }; - wsa_swr_clk_pin { wsa_swr_clk_sleep: wsa_swr_clk_sleep { mux { @@ -1403,6 +1413,217 @@ }; }; }; + emac { + emac_mdc: emac_mdc { + mux { + pins = "gpio113"; + function = "rgmii_mdc"; + }; + + config { + pins = "gpio113"; + bias-pull-up; + }; + }; + emac_mdio: emac_mdio { + mux { + pins = "gpio114"; + function = "rgmii_mdio"; + }; + + config { + pins = "gpio114"; + bias-pull-up; + }; + }; + + emac_rgmii_txd0: emac_rgmii_txd0 { + mux { + pins = "gpio96"; + function = "rgmii_txd0"; + }; + + config { + pins = "gpio96"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + emac_rgmii_txd1: emac_rgmii_txd1 { + mux { + pins = "gpio95"; + function = "rgmii_txd1"; + }; + + config { + pins = "gpio95"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + emac_rgmii_txd2: emac_rgmii_txd2 { + mux { + pins = "gpio94"; + function = "rgmii_txd2"; + }; + + config { + pins = "gpio94"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_txd3: emac_rgmii_txd3 { + mux { + pins = "gpio93"; + function = "rgmii_txd3"; + }; + + config { + pins = "gpio93"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_txc: emac_rgmii_txc { + mux { + pins = "gpio92"; + function = "rgmii_txc"; + }; + + config { + pins = "gpio92"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_tx_ctl: emac_rgmii_tx_ctl { + mux { + pins = "gpio97"; + function = "rgmii_tx"; + }; + + config { + pins = "gpio97"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + + emac_rgmii_rxd0: emac_rgmii_rxd0 { + mux { + pins = "gpio83"; + function = "rgmii_rxd0"; + }; + + config { + pins = "gpio83"; + bias-disable; /* NO pull */ + drive-strength = <2>; /* 2MA */ + }; + }; + + emac_rgmii_rxd1: emac_rgmii_rxd1 { + mux { + pins = "gpio82"; + function = "rgmii_rxd1"; + }; + + config { + pins = "gpio82"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + + emac_rgmii_rxd2: emac_rgmii_rxd2 { + mux { + pins = "gpio81"; + function = "rgmii_rxd2"; + }; + + config { + pins = "gpio81"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rxd3: emac_rgmii_rxd3 { + mux { + pins = "gpio103"; + function = "rgmii_rxd3"; + }; + + config { + pins = "gpio103"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rxc: emac_rgmii_rxc { + mux { + pins = "gpio102"; + function = "rgmii_rxc"; + }; + + config { + pins = "gpio102"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rx_ctl: emac_rgmii_rx_ctl { + mux { + pins = "gpio112"; + function = "rgmii_rx"; + }; + + config { + pins = "gpio112"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_phy_intr: emac_phy_intr { + mux { + pins = "gpio121"; + function = "emac_phy"; + }; + + config { + pins = "gpio121"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_phy_reset_state: emac_phy_reset_state { + mux { + pins = "gpio104"; + function = "gpio"; + }; + + config { + pins = "gpio104"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_pin_pps_0: emac_pin_pps_0 { + mux { + pins = "gpio91"; + function = "rgmii_sync"; + }; + + config { + pins = "gpio91"; + bias-pull-up; + drive-strength = <16>; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts index 798260de18464dbff3bcb3744546c6f50b00b82d..0ad6d57baf8c5cdfecc0243066a5c820dc839d7a 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-qrd-overlay.dts @@ -20,7 +20,7 @@ #include "sm6150-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 QRD"; + model = "QRD"; compatible = "qcom,sm6150-qrd", "qcom,sm6150", "qcom,qrd"; qcom,board-id = <11 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-qrd.dts b/arch/arm64/boot/dts/qcom/sm6150-qrd.dts index 8be78315c2665b48fbb8e6439225d596bbf8e3ac..2be0c3b8ed4067b346c720665206b3af87e1fd87 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-qrd.dts @@ -16,7 +16,7 @@ #include "sm6150-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 QRD"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 QRD"; compatible = "qcom,sm6150-qrd", "qcom,sm6150", "qcom,qrd"; qcom,board-id = <11 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi index ea391e03ac8bce8fca85dd22123f8679b292123e..6efcdcface298dfd75baffa2f1d785e2af379bfa 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-qrd.dtsi @@ -107,10 +107,14 @@ &pm6150_charger { io-channels = <&pm6150_vadc ADC_USB_IN_V_16>, <&pm6150_vadc ADC_USB_IN_I>, - <&pm6150_vadc ADC_CHG_TEMP>; + <&pm6150_vadc ADC_CHG_TEMP>, + <&pm6150_vadc ADC_DIE_TEMP>, + <&pm6150_vadc ADC_AMUX_THM4_PU2>; io-channel-names = "usb_in_voltage", "usb_in_current", - "chg_temp"; + "chg_temp", + "die_temp", + "conn_temp"; qcom,battery-data = <&mtp_batterydata>; qcom,step-charging-enable; qcom,sw-jeita-enable; @@ -219,3 +223,24 @@ status = "ok"; }; + +&soc { + fpc1020 { + compatible = "fpc,fpc1020"; + interrupt-parent = <&tlmm>; + interrupts = <93 0>; + fpc,gpio_rst = <&tlmm 101 0x0>; + fpc,gpio_irq = <&tlmm 93 0>; + vcc_spi-supply = <&pm6150_l10>; + vdd_io-supply = <&pm6150_l10>; + vdd_ana-supply = <&pm6150_l10>; + fpc,enable-on-boot; + pinctrl-names = "fpc1020_reset_reset", + "fpc1020_reset_active", + "fpc1020_irq_active"; + pinctrl-0 = <&fpc_reset_low>; + pinctrl-1 = <&fpc_reset_high>; + pinctrl-2 = <&fpc_int_low>; + }; + +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts index 40685f9ae444e821504a6b96869f95ddaa0fa7e1..cafec923b09861d91699c7297672df12f23323b4 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts @@ -19,7 +19,7 @@ #include "sm6150-rumi.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 RUMI"; + model = "RUMI"; compatible = "qcom,sm6150-rumi", "qcom,sm6150", "qcom,rumi"; qcom,msm-id = <355 0x0>; qcom,board-id = <15 0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-rumi.dts b/arch/arm64/boot/dts/qcom/sm6150-rumi.dts index fd344ffd4673837efc62856656eb669c9f6df562..7ba8c195075ae31fc878f1c3ab78c4eb5b50491e 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-rumi.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-rumi.dts @@ -17,7 +17,7 @@ #include "sm6150-rumi.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 RUMI"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 RUMI"; compatible = "qcom,sm6150-rumi", "qcom,sm6150", "qcom,rumi"; qcom,board-id = <15 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi index fbc0be04ba6530b5386c8482d650212d071ef0cb..33ab2dafa7344ca33bf4351e6c7350adcaec27c7 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-sde-display.dtsi @@ -310,7 +310,7 @@ qcom,display-topology = <1 0 1>; qcom,default-topology-index = <0>; qcom,partial-update-enabled = "single_roi"; - qcom,panel-roi-alignment = <16 16 1 1 16 16>; + qcom,panel-roi-alignment = <16 16 8 2 16 16>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi b/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi index 047cc78c9215d3e369470205ea45d23d0fb43d13..953939f7917ccb579c84a4a65198b5f35ee19692 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-sde.dtsi @@ -35,6 +35,7 @@ "lut_clk", "rot_clk"; clock-rate = <0 0 0 256000000 19200000 192000000>; clock-max-rate = <0 0 0 307000000 19200000 307000000>; + qcom,dss-cx-ipeak = <&cx_ipeak_lm 3>; sde-vdd-supply = <&mdss_core_gdsc>; @@ -351,6 +352,9 @@ qcom,mdss-rot-danger-lut = <0x0 0x0>; qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>; + qcom,mdss-rot-qos-cpu-mask = <0xf>; + qcom,mdss-rot-qos-cpu-dma-latency = <75>; + qcom,mdss-default-ot-rd-limit = <32>; qcom,mdss-default-ot-wr-limit = <32>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi b/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi index bcc831e3c16d358ff76cf5033a6485b38aa5ae08..7f75ab1f3e9c03a15fa6c63778a381c48901d180 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi @@ -86,6 +86,12 @@ }; }; }; + + cxip_cdev: cxip-cdev@1fed000 { + compatible = "qcom,cxip-lm-cooling-device"; + reg = <0x1fed000 0x24>; + #cooling-cells = <2>; + }; }; &thermal_zones { @@ -396,23 +402,23 @@ thermal-governor = "step_wise"; thermal-sensors = <&tsens0 1>; trips { - cpu01_config: cpu01-config { + cpu45_config: cpu45-config { temperature = <110000>; hysteresis = <10000>; type = "passive"; }; }; cooling-maps { - cpu0_cdev { - trip = <&cpu01_config>; + cpu4_cdev { + trip = <&cpu45_config>; cooling-device = - <&CPU0 THERMAL_MAX_LIMIT + <&CPU4 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; - cpu1_cdev { - trip = <&cpu01_config>; + cpu5_cdev { + trip = <&cpu45_config>; cooling-device = - <&CPU1 THERMAL_MAX_LIMIT + <&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -452,23 +458,23 @@ thermal-sensors = <&tsens0 3>; thermal-governor = "step_wise"; trips { - cpu45_config: cpu45-config { + cpu01_config: cpu01-config { temperature = <110000>; hysteresis = <10000>; type = "passive"; }; }; cooling-maps { - cpu4_cdev { - trip = <&cpu45_config>; + cpu0_cdev { + trip = <&cpu01_config>; cooling-device = - <&CPU4 THERMAL_MAX_LIMIT + <&CPU0 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; - cpu5_cdev { - trip = <&cpu45_config>; + cpu1_cdev { + trip = <&cpu01_config>; cooling-device = - <&CPU5 THERMAL_MAX_LIMIT + <&CPU1 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>; }; }; @@ -1362,6 +1368,26 @@ }; }; + q6-hvx-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 10>; + thermal-governor = "step_wise"; + trips { + q6_hvx_config: q6-hvx-config { + temperature = <95000>; + hysteresis = <20000>; + type = "passive"; + }; + }; + cooling-maps { + cxip-cdev { + trip = <&q6_hvx_config>; + cooling-device = <&cxip_cdev 1 1>; + }; + }; + }; + xo-therm { polling-delay-passive = <0>; polling-delay = <0>; @@ -1459,4 +1485,145 @@ }; }; }; + + quiet-therm-step { + polling-delay-passive = <2000>; + polling-delay = <0>; + thermal-governor = "step_wise"; + thermal-sensors = <&pm6150l_adc_tm ADC_GPIO4_PU2>; + trips { + gold_trip: gold-trip { + temperature = <46000>; + hysteresis = <0>; + type = "passive"; + }; + skin_gpu_trip: skin-gpu-trip { + temperature = <48000>; + hysteresis = <0>; + type = "passive"; + }; + modem_trip0: modem-trip0 { + temperature = <48000>; + hysteresis = <4000>; + type = "passive"; + }; + modem_trip1_batt_trip0: modem-trip1 { + temperature = <50000>; + hysteresis = <4000>; + type = "passive"; + }; + silver_trip: silver-trip { + temperature = <52000>; + hysteresis = <0>; + type = "passive"; + }; + modem_trip2_batt_trip1: modem-trip2 { + temperature = <52000>; + hysteresis = <2000>; + type = "passive"; + }; + batt_trip2: batt-trip2 { + temperature = <54000>; + hysteresis = <2000>; + type = "passive"; + }; + modem_trip3: modem-trip3 { + temperature = <56000>; + hysteresis = <4000>; + type = "passive"; + }; + batt_trip3: batt-trip3 { + temperature = <56000>; + hysteresis = <2000>; + type = "passive"; + }; + }; + cooling-maps { + skin_cpu6 { + trip = <&gold_trip>; + cooling-device = + /* throttle from fmax to 1708800KHz */ + <&CPU6 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-9)>; + }; + skin_cpu7 { + trip = <&gold_trip>; + cooling-device = + <&CPU7 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-9)>; + }; + skin_cpu0 { + trip = <&silver_trip>; + /* throttle from fmax to 1516800KHz */ + cooling-device = <&CPU0 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_cpu1 { + trip = <&silver_trip>; + cooling-device = <&CPU1 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_cpu2 { + trip = <&silver_trip>; + cooling-device = <&CPU2 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_cpu3 { + trip = <&silver_trip>; + cooling-device = <&CPU3 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_cpu4 { + trip = <&silver_trip>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_cpu5 { + trip = <&silver_trip>; + cooling-device = <&CPU5 THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-7)>; + }; + skin_gpu { + trip = <&skin_gpu_trip>; + cooling-device = <&msm_gpu THERMAL_NO_LIMIT + (THERMAL_MAX_LIMIT-4)>; + }; + modem_lvl1 { + trip = <&modem_trip1_batt_trip0>; + cooling-device = <&modem_pa 1 1>; + }; + modem_lvl2 { + trip = <&modem_trip2_batt_trip1>; + cooling-device = <&modem_pa 2 2>; + }; + modem_lvl3 { + trip = <&modem_trip3>; + cooling-device = <&modem_pa 3 3>; + }; + modem_proc_lvl1 { + trip = <&modem_trip0>; + cooling-device = <&modem_proc 1 1>; + }; + modem_proc_lvl3 { + trip = <&modem_trip3>; + cooling-device = <&modem_proc 3 3>; + }; + battery_lvl0 { + trip = <&modem_trip1_batt_trip0>; + cooling-device = <&pm6150_charger 1 1>; + }; + battery_lvl1 { + trip = <&modem_trip2_batt_trip1>; + cooling-device = <&pm6150_charger 2 2>; + }; + battery_lvl2 { + trip = <&batt_trip2>; + cooling-device = <&pm6150_charger 4 4>; + }; + battery_lvl3 { + trip = <&batt_trip3>; + cooling-device = <&pm6150_charger 5 5>; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi index 035adb3fef537d0dc7d05f36615ecabf45c7ba82..6772c26d76899e038e99a35b8662006137e419bd 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-usb.dtsi @@ -160,7 +160,7 @@ qcom,major-rev = <1>; clocks = <&clock_rpmh RPMH_CXO_CLK>, - <&clock_gcc GCC_USB2_PRIM_CLKREF_CLK>, + <&clock_gcc GCC_RX1_USB2_CLKREF_CLK>, <&clock_gcc GCC_AHB2PHY_WEST_CLK>; clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk"; @@ -410,7 +410,7 @@ qcom,hold-reset; clocks = <&clock_rpmh RPMH_CXO_CLK>, - <&clock_gcc GCC_USB2_SEC_CLKREF_CLK>, + <&clock_gcc GCC_RX3_USB2_CLKREF_CLK>, <&clock_gcc GCC_AHB2PHY_WEST_CLK>; clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk"; diff --git a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts index 634d127d31eeb406e33619ea658cf5c58057f67f..c56081f944305bd26cda5e77f2d20f756ef0287e 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp-overlay.dts @@ -17,9 +17,10 @@ #include #include "sm6150-idp.dtsi" +#include "sm6150-usbc-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 USBC Audio IDP"; + model = "USBC Audio IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,msm-id = <355 0x0>; qcom,board-id = <34 2>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dts b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dts index 8bd5d4a576444bbb725ff0f0dd209ca004e1976c..dc1c415d21f2474a902f3dd7e8baf3d6b04dc7ab 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dts @@ -14,9 +14,10 @@ #include "sm6150.dtsi" #include "sm6150-idp.dtsi" +#include "sm6150-usbc-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 USBC Audio IDP"; + model = "Qualcomm Technologies, Inc. SM6150 PM6150 USBC Audio IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; qcom,board-id = <34 2>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..0437415f502fe854d8394a85a516e88bf19f8359 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-usbc-idp.dtsi @@ -0,0 +1,17 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm6150-audio-overlay.dtsi" + +&sm6150_snd { + qcom,msm-mbhc-usbc-audio-supported = <1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150.dts b/arch/arm64/boot/dts/qcom/sm6150.dts index 31c44c4c030536a8ea11e167a1d1dafc53cea0a6..8218575996501be47e86d53f89daa9a83bf3611b 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dts +++ b/arch/arm64/boot/dts/qcom/sm6150.dts @@ -17,5 +17,6 @@ / { model = "Qualcomm Technologies, Inc. SM6150 SoC"; compatible = "qcom,sm6150"; + qcom,pmic-name = "PM6150"; qcom,board-id = <0 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index bd949aefa80c377fc9f87c450ba2e546a35906b0..0178d0926f9292ed523f7ffe52acda8dcaf093ae 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -31,6 +31,7 @@ / { model = "Qualcomm Technologies, Inc. SM6150"; compatible = "qcom,sm6150"; + qcom,msm-name = "SM6150"; qcom,msm-id = <355 0x0>; interrupt-parent = <&pdc>; @@ -628,7 +629,7 @@ alloc-ranges = <0 0x00000000 0 0xffffffff>; reusable; alignment = <0 0x400000>; - size = <0 0x5c00000>; + size = <0 0x8c00000>; }; cont_splash_memory: cont_splash_region@9c000000 { @@ -686,6 +687,33 @@ interrupt-controller; }; + qcom,memshare { + compatible = "qcom,memshare"; + + qcom,client_1 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <0>; + qcom,allocate-boot-time; + label = "modem"; + }; + + qcom,client_2 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <2>; + label = "modem"; + }; + + mem_client_3_size: qcom,client_3 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x500000>; + qcom,client-id = <1>; + qcom,allocate-on-request; + label = "modem"; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = <1 1 0xf08>, @@ -1379,25 +1407,25 @@ /* No vote */ <78 512 0 0>, <1 606 0 0>, /* 400 KB/s*/ - <78 512 1046 3200>, - <1 606 1600 3200>, + <78 512 1046 1600>, + <1 606 1600 1600>, /* 20 MB/s */ - <78 512 52286 160000>, - <1 606 80000 160000>, + <78 512 52286 80000>, + <1 606 80000 80000>, /* 25 MB/s */ - <78 512 65360 200000>, - <1 606 100000 200000>, + <78 512 65360 100000>, + <1 606 100000 100000>, /* 50 MB/s */ - <78 512 130718 400000>, + <78 512 130718 200000>, <1 606 133320 133320>, /* 100 MB/s */ - <78 512 261438 400000>, - <1 606 150000 300000>, + <78 512 130718 200000>, + <1 606 150000 150000>, /* 200 MB/s */ <78 512 261438 400000>, <1 606 300000 300000>, /* 400 MB/s */ - <78 512 261438 1100000>, + <78 512 261438 400000>, <1 606 300000 300000>, /* Max. bandwidth */ <78 512 1338562 4096000>, @@ -1413,8 +1441,11 @@ qcom,pm-qos-legacy-latency-us = <67 67>, <67 67>; clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>, - <&clock_gcc GCC_SDCC1_APPS_CLK>; - clock-names = "iface_clk", "core_clk"; + <&clock_gcc GCC_SDCC1_APPS_CLK>, + <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>; + clock-names = "iface_clk", "core_clk", "ice_core_clk"; + + qcom,ice-clk-rates = <300000000 75000000>; qcom,nonremovable; status = "disabled"; @@ -1519,6 +1550,29 @@ clock-names = "iface_clk"; }; + ufs_ice: ufsice@1d90000 { + compatible = "qcom,ice"; + reg = <0x1d90000 0x8000>; + qcom,enable-ice-clk; + clock-names = "ufs_core_clk", "bus_clk", + "iface_clk", "ice_core_clk"; + clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>, + <&clock_gcc GCC_UFS_PHY_AHB_CLK>, + <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>; + qcom,op-freq-hz = <0>, <0>, <0>, <300000000>; + vdd-hba-supply = <&ufs_phy_gdsc>; + qcom,msm-bus,name = "ufs_ice_noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 650 0 0>, /* No vote */ + <1 650 1000 0>; /* Max. bandwidth */ + qcom,bus-vector-names = "MIN", + "MAX"; + qcom,instance-type = "ufs"; + }; + ufsphy_mem: ufsphy_mem@1d87000 { reg = <0x1d87000 0xdb8>; /* PHY regs */ reg-names = "phy_mem"; @@ -1542,6 +1596,7 @@ interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <1>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ @@ -2037,6 +2092,21 @@ qcom,glink-channels = "g_glink_audio_data"; qcom,intents = <0x1000 2>; }; + + qcom,diag_data { + qcom,glink-channels = "DIAG_DATA"; + qcom,intents = <0x4000 2>; + }; + + qcom,diag_ctrl { + qcom,glink-channels = "DIAG_CTRL"; + qcom,intents = <0x4000 1>; + }; + + qcom,diag_cmd { + qcom,glink-channels = "DIAG_CMD"; + qcom,intents = <0x4000 1 >; + }; }; }; @@ -2097,6 +2167,9 @@ qcom,smp2p_sleepstate { compatible = "qcom,smp2p-sleepstate"; qcom,smem-states = <&sleepstate_smp2p_out 0>; + interrupt-parent = <&sleepstate_smp2p_in>; + interrupts = <0 0>; + interrupt-names = "smp2p-sleepstate-in"; }; qcom,smp2p-modem { @@ -2129,6 +2202,12 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_wlan_1_in: qcom,smp2p-wlan-1-in { + qcom,entry-name = "wlan"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; qcom,smp2p-adsp { @@ -2154,6 +2233,12 @@ qcom,entry-name = "sleepstate"; #qcom,smem-state-cells = <1>; }; + + sleepstate_smp2p_in: qcom,sleepstate-in { + qcom,entry-name = "sleepstate_see"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; qcom,smp2p-cdsp { @@ -2354,7 +2439,7 @@ qcom,rmnet-ipa { compatible = "qcom,rmnet-ipa3"; qcom,rmnet-ipa-ssr; - qcom,ipa-loaduC; + qcom,ipa-platform-type-msm; qcom,ipa-advertise-sg-support; qcom,ipa-napi-enable; }; @@ -2481,11 +2566,18 @@ qcom,smmu-s1-bypass; qcom,wlan-msa-memory = <0x100000>; qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; - vdd-0.8-cx-mx-supply = <&pm6150_l9>; + vdd-cx-mx-supply = <&pm6150_l9>; vdd-1.8-xo-supply = <&pm6150l_l1>; vdd-1.3-rfa-supply = <&pm6150l_l2>; vdd-3.3-ch0-supply = <&pm6150l_l10>; - qcom,vdd-0.8-cx-mx-config = <640000 640000>; + qcom,vdd-cx-mx-config = <640000 640000>; + qcom,smp2p_map_wlan_1_in { + interrupts-extended = <&smp2p_wlan_1_in 0 0>, + <&smp2p_wlan_1_in 1 0>; + interrupt-names = "qcom,smp2p-force-fatal-error", + "qcom,smp2p-early-crash-ind"; + }; + }; qcom,rmtfs_sharedmem@0 { @@ -2493,7 +2585,6 @@ reg = <0x0 0x200000>; reg-names = "rmtfs"; qcom,client-id = <0x00000001>; - qcom,guard-memory; }; llcc_pmu: llcc-pmu@90cc000 { @@ -2599,6 +2690,7 @@ qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; qcom,target-dev = <&cpu0_cpu_l3_lat>; qcom,cachemiss-ev = <0x17>; + qcom,stall-cycle-ev = <0xE7>; qcom,core-dev-table = < 576000 300000000 >, < 1017600 556800000 >, @@ -2619,6 +2711,7 @@ qcom,cpulist = <&CPU6 &CPU7>; qcom,target-dev = <&cpu6_cpu_l3_lat>; qcom,cachemiss-ev = <0x17>; + qcom,stall-cycle-ev = <0x15E>; qcom,core-dev-table = < 1017600 556800000 >, < 1209600 806400000 >, @@ -2641,6 +2734,7 @@ qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; qcom,target-dev = <&cpu0_cpu_llcc_lat>; qcom,cachemiss-ev = <0x2A>; + qcom,stall-cycle-ev = <0xE7>; qcom,core-dev-table = < 748000 MHZ_TO_MBPS(150, 16) >, < 1209600 MHZ_TO_MBPS(300, 16) >, @@ -2662,6 +2756,7 @@ qcom,cpulist = <&CPU6 &CPU7>; qcom,target-dev = <&cpu6_cpu_llcc_lat>; qcom,cachemiss-ev = <0x2A>; + qcom,stall-cycle-ev = <0x15E>; qcom,core-dev-table = < 768000 MHZ_TO_MBPS(300, 16) >, < 1017600 MHZ_TO_MBPS(466, 16) >, @@ -2684,6 +2779,7 @@ qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>; qcom,target-dev = <&cpu0_llcc_ddr_lat>; qcom,cachemiss-ev = <0x1000>; + qcom,stall-cycle-ev = <0xE7>; qcom,core-dev-table = < 748000 MHZ_TO_MBPS( 300, 4) >, < 1017600 MHZ_TO_MBPS( 451, 4) >, @@ -2706,6 +2802,7 @@ qcom,cpulist = <&CPU6 &CPU7>; qcom,target-dev = <&cpu6_llcc_ddr_lat>; qcom,cachemiss-ev = <0x1000>; + qcom,stall-cycle-ev = <0x15E>; qcom,core-dev-table = < 768000 MHZ_TO_MBPS( 451, 4) >, < 1017600 MHZ_TO_MBPS( 547, 4) >, @@ -2767,6 +2864,15 @@ qcom,msm-bus,active-only; status = "ok"; }; + + cx_ipeak_lm: cx_ipeak@01fed000 { + compatible = "qcom,cx-ipeak-sm6150"; + reg = <0x1fed000 0x28>; + }; + + demux { + compatible = "qcom,demux"; + }; }; #include "pm6150.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts index a5423f1ef523a83604105c8b9fce4010be6dcb93..7a0949e3f0bc79dff0a962625f7234df441a5f8d 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-idp-overlay.dts @@ -19,7 +19,7 @@ #include "sm6150-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150P IDP"; + model = "IDP"; compatible = "qcom,sm6150p-idp", "qcom,sm6150p", "qcom,idp"; qcom,msm-id = <369 0x0>; qcom,board-id = <34 0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150p-idp.dts b/arch/arm64/boot/dts/qcom/sm6150p-idp.dts index 1344a61a9bcc37608903ae4196def7ee5d090fbd..142a15bb6284b642a12a3a55f32d6dd3133ebcde 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-idp.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-idp.dts @@ -16,7 +16,7 @@ #include "sm6150-idp.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150P IDP"; + model = "Qualcomm Technologies, Inc. SM6150P PM6150 IDP"; compatible = "qcom,sm6150p-idp", "qcom,sm6150p", "qcom,idp"; qcom,board-id = <34 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts index b08051cdc0c1bfe95fa4ade86a029a4cee0d6c7a..70dd2979a05f81292a232b3343959976cb81989a 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-qrd-overlay.dts @@ -19,7 +19,7 @@ #include "sm6150-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150P QRD"; + model = "QRD"; compatible = "qcom,sm6150p-qrd", "qcom,sm6150p", "qcom,qrd"; qcom,board-id = <11 0>; qcom,msm-id = <369 0>; diff --git a/arch/arm64/boot/dts/qcom/sm6150p-qrd.dts b/arch/arm64/boot/dts/qcom/sm6150p-qrd.dts index fed875562496bcc4135f2dac07a2d7f1e1e1d4d8..7778ec4faddba6144df54182a247665c97d77972 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p-qrd.dts @@ -16,7 +16,7 @@ #include "sm6150-qrd.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150P QRD"; + model = "Qualcomm Technologies, Inc. SM6150P PM6150 QRD"; compatible = "qcom,sm6150p-qrd", "qcom,sm6150p", "qcom,qrd"; qcom,board-id = <11 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150p.dts b/arch/arm64/boot/dts/qcom/sm6150p.dts index 88bd794569fda6a315a40249f9df22316d58ea58..9f3e09cd2941a9b46520960ee6dd0728ddbc8756 100644 --- a/arch/arm64/boot/dts/qcom/sm6150p.dts +++ b/arch/arm64/boot/dts/qcom/sm6150p.dts @@ -17,5 +17,6 @@ / { model = "Qualcomm Technologies, Inc. SM6150P SoC"; compatible = "qcom,sm6150p"; + qcom,pmic-name = "PM6150"; qcom,board-id = <0 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi index c8dafeeb7bd4548e888093c8de262f9a13b1ae11..4797ab43589792b4f906986e12e630a216344b74 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera.dtsi @@ -534,7 +534,7 @@ "svs_l1", "nominal", "turbo"; control-camnoc-axi-clk; camnoc-bus-width = <32>; - camnoc-axi-clk-bw-margin-perc = <10>; + camnoc-axi-clk-bw-margin-perc = <20>; qcom,msm-bus,name = "cam_ahb"; qcom,msm-bus,num-cases = <7>; qcom,msm-bus,num-paths = <1>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..871ddc3f53ac90887f6a783e6c9540f95ab1c90e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu-v2.dtsi @@ -0,0 +1,37 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + gpu_opp_table_v2: gpu_opp_table_v2 { + compatible = "operating-points-v2"; + + opp-585000000 { + opp-hz = /bits/ 64 <585000000>; + opp-microvolt = ; + }; + + opp-427000000 { + opp-hz = /bits/ 64 <427000000>; + opp-microvolt = ; + }; + + opp-345000000 { + opp-hz = /bits/ 64 <345000000>; + opp-microvolt = ; + }; + + opp-257000000 { + opp-hz = /bits/ 64 <257000000>; + opp-microvolt = ; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi index 0b10cbcb1089bc20ca227f2dea7bd84aeb622bf6..4ef8eb9f00e573c3d194ca41d697c0c9ba958691 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi @@ -216,7 +216,6 @@ qcom,bus-freq = <12>; qcom,bus-min = <10>; qcom,bus-max = <12>; - qcom,dvm-val = <0xffffffff>; }; qcom,gpu-pwrlevel@1 { @@ -225,7 +224,6 @@ qcom,bus-freq = <10>; qcom,bus-min = <9>; qcom,bus-max = <11>; - qcom,dvm-val = <0xffffffff>; }; @@ -235,7 +233,6 @@ qcom,bus-freq = <9>; qcom,bus-min = <8>; qcom,bus-max = <10>; - qcom,dvm-val = <0xffffffff>; }; qcom,gpu-pwrlevel@3 { @@ -244,7 +241,6 @@ qcom,bus-freq = <8>; qcom,bus-min = <7>; qcom,bus-max = <9>; - qcom,dvm-val = <0xffffffff>; }; @@ -254,7 +250,6 @@ qcom,bus-freq = <5>; qcom,bus-min = <5>; qcom,bus-max = <7>; - qcom,dvm-val = <0xffffffff>; }; qcom,gpu-pwrlevel@5 { @@ -263,7 +258,6 @@ qcom,bus-freq = <4>; qcom,bus-min = <3>; qcom,bus-max = <5>; - qcom,dvm-val = <0xffffffff>; }; qcom,gpu-pwrlevel@6 { @@ -272,7 +266,6 @@ qcom,bus-freq = <0>; qcom,bus-min = <0>; qcom,bus-max = <0>; - qcom,dvm-val = <0xffffffff>; }; }; }; @@ -312,8 +305,12 @@ label = "kgsl-gmu"; compatible = "qcom,gpu-gmu"; - reg = <0x2c6a000 0x30000>, <0xb200000 0x300000>; - reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg"; + reg = <0x2c6a000 0x30000>, + <0xb280000 0x10000>, + <0xb480000 0x10000>; + reg-names = "kgsl_gmu_reg", + "kgsl_gmu_pdc_cfg", + "kgsl_gmu_pdc_seq"; interrupts = <0 304 0>, <0 305 0>; interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi index e203fb0a1e753dc2fcad342bbca1f1009da2bdda..2f7244fe585dc0f8dcf041ce971368c862e27f81 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi @@ -628,6 +628,7 @@ qcom,battery-data = <&mtp_batterydata>; qcom,step-charging-enable; qcom,sw-jeita-enable; + qcom,wd-bark-time-secs = <16>; }; &smb1390 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi index a06d5305bd8dd2cbfd8080bbe4ffa60c36957995..83c8f51629542c6479e66eaeb17dce863884b042 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi @@ -67,8 +67,8 @@ vdd_cx-supply = <&VDD_CX_LEVEL>; qcom,proxy-reg-names ="vdd", "vdd_cx"; qcom,vdd_cx-uV-uA = ; - mboxes = <&qmp_npu0 0>, <&qmp_npu1 0>; - mbox-names = "npu_low", "npu_high"; + mboxes = <&qmp_aop 0>; + mbox-names = "aop"; #cooling-cells = <2>; qcom,npubw-dev = <&npu_npu_ddr_bw>; qcom,npu-pwrlevels { @@ -78,29 +78,6 @@ initial-pwrlevel = <4>; qcom,npu-pwrlevel@0 { reg = <0>; - clk-freq = <9600000 - 19200000 - 19200000 - 19200000 - 19200000 - 9600000 - 60000000 - 19200000 - 19200000 - 30000000 - 19200000 - 19200000 - 19200000 - 19200000 - 9600000 - 19200000 - 0 - 0 - 0 - 0>; - }; - qcom,npu-pwrlevel@1 { - reg = <1>; clk-freq = <300000000 19200000 100000000 @@ -122,8 +99,8 @@ 0 0>; }; - qcom,npu-pwrlevel@2 { - reg = <2>; + qcom,npu-pwrlevel@1 { + reg = <1>; clk-freq = <350000000 19200000 150000000 @@ -145,8 +122,8 @@ 0 0>; }; - qcom,npu-pwrlevel@3 { - reg = <3>; + qcom,npu-pwrlevel@2 { + reg = <2>; clk-freq = <400000000 19200000 200000000 @@ -168,8 +145,8 @@ 0 0>; }; - qcom,npu-pwrlevel@4 { - reg = <4>; + qcom,npu-pwrlevel@3 { + reg = <3>; clk-freq = <600000000 19200000 300000000 @@ -191,8 +168,8 @@ 0 0>; }; - qcom,npu-pwrlevel@5 { - reg = <5>; + qcom,npu-pwrlevel@4 { + reg = <4>; clk-freq = <715000000 19200000 350000000 diff --git a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi index b4130aad5a2bcc0ade5b2bc5e2d3284d1f58d003..3c48494c30d2f4bbce08a9a90eeea0075a8c0e45 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pcie.dtsi @@ -134,19 +134,17 @@ 0x0034 0x06 0x0 0x029c 0x12 0x0 0x0284 0x05 0x0 - 0x051c 0x03 0x0 + 0x0c38 0x03 0x0 0x0518 0x1c 0x0 0x0524 0x14 0x0 - 0x04e8 0x07 0x0 - 0x04ec 0x6e 0x0 - 0x04f0 0x6e 0x0 - 0x04f4 0x4a 0x0 + 0x04e8 0x00 0x0 + 0x04ec 0x0e 0x0 + 0x04f0 0x4a 0x0 + 0x04f4 0x0f 0x0 0x05b4 0x04 0x0 0x0434 0x7f 0x0 0x0444 0x70 0x0 0x0510 0x17 0x0 - 0x04d4 0x54 0x0 - 0x04d8 0x37 0x0 0x0598 0xd4 0x0 0x059c 0x54 0x0 0x05a0 0xdb 0x0 @@ -156,7 +154,7 @@ 0x0588 0xe4 0x0 0x058c 0xec 0x0 0x0590 0x39 0x0 - 0x0594 0x36 0x0 + 0x0594 0x37 0x0 0x0570 0x7f 0x0 0x0574 0xff 0x0 0x0578 0xff 0x0 @@ -164,25 +162,31 @@ 0x0580 0x75 0x0 0x04fc 0x00 0x0 0x04f8 0xc0 0x0 - 0x0460 0xa0 0x0 - 0x0464 0xc0 0x0 - 0x05bc 0x0c 0x0 - 0x04dc 0x05 0x0 - 0x0408 0x0c 0x0 - 0x0414 0x03 0x0 + 0x0414 0x04 0x0 0x09a4 0x01 0x0 0x0c90 0x00 0x0 0x0c40 0x01 0x0 0x0c48 0x01 0x0 0x0c50 0x00 0x0 - 0x0cbc 0x00 0x0 - 0x0ce0 0x58 0x0 0x0048 0x90 0x0 0x0c1c 0xc1 0x0 - 0x0988 0xaa 0x0 - 0x0998 0x0b 0x0 + 0x0988 0x66 0x0 + 0x0998 0x08 0x0 0x08dc 0x0d 0x0 0x09ec 0x01 0x0 + 0x04b4 0x02 0x0 + 0x04b8 0x02 0x0 + 0x04bc 0xaa 0x0 + 0x04c0 0x00 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x07 0x0 + 0x0460 0xa0 0x0 + 0x05c4 0x0c 0x0 + 0x0464 0x00 0x0 + 0x05c0 0x10 0x0 + 0x04dc 0x05 0x0 + 0x0408 0x0c 0x0 + 0x0414 0x03 0x0 0x0800 0x00 0x0 0x0844 0x03 0x0>; @@ -369,6 +373,7 @@ 0x0030 0x4c 0x0 0x0034 0x06 0x0 0x0048 0x90 0x0 + 0x0050 0x07 0x0 0x0058 0x0f 0x0 0x0074 0x06 0x0 0x0078 0x06 0x0 @@ -394,29 +399,25 @@ 0x0110 0x24 0x0 0x0118 0xb4 0x0 0x011c 0x03 0x0 - 0x0154 0x34 0x0 + 0x0154 0x32 0x0 0x0158 0x01 0x0 0x016c 0x08 0x0 0x01ac 0xb9 0x0 0x01b0 0x1e 0x0 0x01b4 0x94 0x0 0x01b8 0x18 0x0 - 0x01bc 0x11 0x0 + 0x01bc 0x01 0x0 0x0284 0x05 0x0 0x029c 0x12 0x0 0x0408 0x0c 0x0 0x0414 0x03 0x0 0x0434 0x7f 0x0 0x0444 0x70 0x0 - 0x0460 0xa0 0x0 - 0x0464 0xc0 0x0 - 0x04d4 0x54 0x0 - 0x04d8 0x37 0x0 - 0x04dc 0x05 0x0 - 0x04e8 0x07 0x0 - 0x04ec 0x6e 0x0 - 0x04f0 0x6e 0x0 - 0x04f4 0x4a 0x0 + 0x04d8 0x01 0x0 + 0x04e8 0x00 0x0 + 0x04ec 0x0e 0x0 + 0x04f0 0x4a 0x0 + 0x04f4 0x0f 0x0 0x04f8 0xc0 0x0 0x04fc 0x00 0x0 0x0510 0x17 0x0 @@ -438,22 +439,29 @@ 0x05a0 0xdb 0x0 0x05a4 0x39 0x0 0x05a8 0x31 0x0 - 0x05bc 0x0c 0x0 + 0x05b4 0x04 0x0 + 0x04b4 0x02 0x0 + 0x04b8 0x02 0x0 + 0x04bc 0xaa 0x0 + 0x04c0 0x00 0x0 + 0x04d4 0x54 0x0 + 0x04d8 0x07 0x0 + 0x0460 0xa0 0x0 + 0x05c4 0x0c 0x0 + 0x0464 0x00 0x0 + 0x05c0 0x10 0x0 + 0x04dc 0x05 0x0 0x0684 0x05 0x0 0x069c 0x12 0x0 0x0808 0x0c 0x0 0x0814 0x03 0x0 0x0834 0x7f 0x0 0x0844 0x70 0x0 - 0x0860 0xa0 0x0 - 0x0864 0xc0 0x0 - 0x08d4 0x54 0x0 - 0x08d8 0x37 0x0 - 0x08dc 0x05 0x0 - 0x08e8 0x07 0x0 - 0x08ec 0x6e 0x0 - 0x08f0 0x6e 0x0 - 0x08f4 0x4a 0x0 + 0x08d8 0x01 0x0 + 0x08e8 0x00 0x0 + 0x08ec 0x0e 0x0 + 0x08f0 0x4a 0x0 + 0x08f4 0x0f 0x0 0x08f8 0xc0 0x0 0x08fc 0x00 0x0 0x0910 0x17 0x0 @@ -468,27 +476,41 @@ 0x0984 0x24 0x0 0x0988 0xe4 0x0 0x098c 0xec 0x0 - 0x0990 0x39 0x0 + 0x0990 0x3a 0x0 0x0994 0x36 0x0 0x0998 0xd4 0x0 0x099c 0x54 0x0 0x09a0 0xdb 0x0 0x09a4 0x39 0x0 0x09a8 0x31 0x0 - 0x09bc 0x0c 0x0 - 0x0adc 0x05 0x0 - 0x0b88 0xaa 0x0 - 0x0b98 0x0b 0x0 + 0x09b4 0x04 0x0 + 0x08b4 0x02 0x0 + 0x08b8 0x02 0x0 + 0x08bc 0xaa 0x0 + 0x08c0 0x00 0x0 + 0x08d4 0x54 0x0 + 0x08d8 0x07 0x0 + 0x0860 0xa0 0x0 + 0x09c4 0x0c 0x0 + 0x0864 0x00 0x0 + 0x09c0 0x10 0x0 + 0x08dc 0x05 0x0 + 0x0a98 0x01 0x0 + 0x0abc 0x56 0x0 + 0x0adc 0x0d 0x0 + 0x0b88 0x66 0x0 0x0ba4 0x01 0x0 - 0x0bec 0x01 0x0 - 0x0e0c 0x0d 0x0 + 0x0b98 0x08 0x0 0x0e14 0x07 0x0 0x0e1c 0xc1 0x0 0x0e40 0x01 0x0 0x0e48 0x01 0x0 + 0x0e78 0x50 0x0 0x0e90 0x00 0x0 - 0x0ebc 0x00 0x0 - 0x0ee0 0x58 0x0 + 0x0ea0 0x11 0x0 + 0x0e38 0x03 0x0 + 0x0e50 0x00 0x0 + 0x0e20 0x01 0x0 0x0a00 0x00 0x0 0x0a44 0x03 0x0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index 8a689c7dcd230b921533950fab20319e1a719622..eb833f080f75de4d7439da257b6813451d7692af 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -246,39 +246,48 @@ config { pins = "gpio85", "gpio86"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; }; qupv3_se13_4uart_pins: qupv3_se13_4uart_pins { - qupv3_se13_4uart_active: qupv3_se13_4uart_active { + qupv3_se13_ctsrx: qupv3_se13_ctsrx { mux { - pins = "gpio43", "gpio44", "gpio45", - "gpio46"; + pins = "gpio43", "gpio46"; function = "qup13"; }; config { - pins = "gpio43", "gpio44", "gpio45", - "gpio46"; + pins = "gpio43", "gpio46"; drive-strength = <2>; bias-disable; }; }; - qupv3_se13_4uart_sleep: qupv3_se13_4uart_sleep { + qupv3_se13_rts: qupv3_se13_rts { mux { - pins = "gpio43", "gpio44", "gpio45", - "gpio46"; - function = "gpio"; + pins = "gpio44"; + function = "qup13"; }; config { - pins = "gpio43", "gpio44", "gpio45", - "gpio46"; + pins = "gpio44"; drive-strength = <2>; - bias-disable; + bias-pull-down; + }; + }; + + qupv3_se13_tx: qupv3_se13_tx { + mux { + pins = "gpio45"; + function = "qup13"; + }; + + config { + pins = "gpio45"; + drive-strength = <2>; + bias-pull-up; }; }; }; @@ -4272,6 +4281,40 @@ drive-strength = <2>; }; }; + emac_phy_intr: emac_phy_intr { + mux { + pins = "gpio124"; + function = "emac_phy"; + }; + config { + pins = "gpio124"; + bias-disable; /* NO pull */ + drive-strength = <8>; + }; + }; + emac_phy_reset_state: emac_phy_reset_state { + mux { + pins = "gpio79"; + function = "gpio"; + }; + config { + pins = "gpio79"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_pin_pps_0: emac_pin_pps_0 { + mux { + pins = "gpio81"; + function = "emac_pps"; + }; + + config { + pins = "gpio81"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL*/ + }; + }; }; bt_en_active: bt_en_active { @@ -4286,5 +4329,31 @@ bias-pull-up; }; }; + + conn_power_1p8_active: conn_power_1p8_active { + mux { + pins = "gpio173"; + function = "gpio"; + }; + + config { + pins = "gpio173"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + conn_power_pa_active: conn_power_pa_active { + mux { + pins = "gpio174"; + function = "gpio"; + }; + + config { + pins = "gpio174"; + drive-strength = <2>; + bias-pull-up; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi index 31dcce67f48aa07beb59243992439c6ec44ffb89..f8555b6e07a9c053edb5770d2edc80ac0007cd70 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi @@ -70,8 +70,8 @@ reg = <1>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,entry-latency-us = <360>; - qcom,exit-latency-us = <531>; + qcom,entry-latency-us = <355>; + qcom,exit-latency-us = <909>; qcom,min-residency-us = <3934>; qcom,is-reset; qcom,use-broadcast-timer; @@ -98,8 +98,8 @@ reg = <1>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,entry-latency-us = <702>; - qcom,exit-latency-us = <1061>; + qcom,entry-latency-us = <241>; + qcom,exit-latency-us = <1461>; qcom,min-residency-us = <4488>; qcom,is-reset; qcom,use-broadcast-timer; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi index c1a7aaed9805ee1a3f856666f016dc0fb7df841b..40b8621dcc200b15b5c6b036849a984315892337 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi @@ -79,6 +79,7 @@ }; &pm8150b_charger { + dpdm-supply = <&usb2_phy0>; smb5_vconn: qcom,smb5-vconn { regulator-name = "smb5-vconn"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd-audio-overlay.dtsi index 92bec8c4cd4f91e5ac5f9b6e1e93b76f5bbc5ca7..512fc98ceb2b44be12337446377d037fb1479bb6 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qrd-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd-audio-overlay.dtsi @@ -41,6 +41,8 @@ "MIC BIAS2", "Headset Mic", "DMIC0", "MIC BIAS1", "MIC BIAS1", "Digital Mic0", + "DMIC1", "MIC BIAS1", + "MIC BIAS1", "Digital Mic1", "DMIC2", "MIC BIAS3", "MIC BIAS3", "Digital Mic2", "DMIC3", "MIC BIAS3", diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi index e2b84564ac8babe1115eb885ec3e9eb09d16ca0f..cc4217d391633f009c94206b950913de3ea33519 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi @@ -199,7 +199,7 @@ extcon = <&pm8150b_pdphy>, <&pm8150b_pdphy>; eq = /bits/ 8 <0x4 0x4 0x4 0x4>; flat-gain = /bits/ 8 <0x3 0x1 0x1 0x3>; - output-comp = /bits/ 8 <0x2 0x2 0x2 0x2>; + output-comp = /bits/ 8 <0x3 0x3 0x3 0x3>; loss-match = /bits/ 8 <0x1 0x3 0x3 0x1>; }; }; @@ -575,6 +575,7 @@ "chg_temp"; qcom,battery-data = <&qrd_batterydata>; qcom,sw-jeita-enable; + qcom,wd-bark-time-secs = <16>; }; &smb1390 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qupv3.dtsi index 8242bbb8c3f211a8dc5f7fe5672b947bfe40f0b2..4ef1873e2aefc7c34c1858ede4f863241eebff97 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qupv3.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qupv3.dtsi @@ -396,7 +396,7 @@ /* Debug UART Instance for CDP/MTP platform */ qupv3_se12_2uart: qcom,qup_uart@0xa90000 { - compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart"; + compatible = "qcom,msm-geni-console"; reg = <0xa90000 0x4000>; reg-names = "se_phys"; clock-names = "se-clk", "m-ahb", "s-ahb"; @@ -413,7 +413,7 @@ /* 4-wire UART */ qupv3_se13_4uart: qcom,qup_uart@0xc8c000 { - compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart"; + compatible = "qcom,msm-geni-serial-hs"; reg = <0xc8c000 0x4000>; reg-names = "se_phys"; clock-names = "se-clk", "m-ahb", "s-ahb"; @@ -421,8 +421,10 @@ <&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>, <&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>; pinctrl-names = "default", "sleep"; - pinctrl-0 = <&qupv3_se13_4uart_active>; - pinctrl-1 = <&qupv3_se13_4uart_sleep>; + pinctrl-0 = <&qupv3_se13_ctsrx>, <&qupv3_se13_rts>, + <&qupv3_se13_tx>; + pinctrl-1 = <&qupv3_se13_ctsrx>, <&qupv3_se13_rts>, + <&qupv3_se13_tx>; interrupts-extended = <&pdc GIC_SPI 585 0>, <&tlmm 46 0>; qcom,wrapper-core = <&qupv3_2>; @@ -951,4 +953,126 @@ dma-names = "tx", "rx"; status = "disabled"; }; + + /* QUPv3 SSC Instances */ + qupv3_3: qcom,qupv3_3_geni_se@26c0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0x26c0000 0x6000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_3_geni_se_cb: qcom,iommu_qupv3_3_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x4e3 0x0>; + }; + }; + + /* I2C */ + qupv3_se20_i2c: i2c@2680000 { + compatible = "qcom,i2c-geni"; + reg = <0x2680000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE0_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se20_i2c_active>; + pinctrl-1 = <&qupv3_se20_i2c_sleep>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; + + qupv3_se21_i2c: i2c@2684000 { + compatible = "qcom,i2c-geni"; + reg = <0x2684000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE1_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se21_i2c_active>; + pinctrl-1 = <&qupv3_se21_i2c_sleep>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; + + qupv3_se22_i2c: i2c@2688000 { + compatible = "qcom,i2c-geni"; + reg = <0x2688000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE2_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se22_i2c_active>; + pinctrl-1 = <&qupv3_se22_i2c_sleep>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; + + qupv3_se23_i2c: i2c@268c000 { + compatible = "qcom,i2c-geni"; + reg = <0x268c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE3_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se23_i2c_active>; + pinctrl-1 = <&qupv3_se23_i2c_sleep>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; + + /* SPI */ + qupv3_se21_spi: spi@2684000 { + compatible = "qcom,spi-geni"; + reg = <0x2684000 0x4000>; + reg-names = "se_phys"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE1_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se21_spi_active>; + pinctrl-1 = <&qupv3_se21_spi_sleep>; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; + + qupv3_se22_spi: spi@2688000 { + compatible = "qcom,spi-geni"; + reg = <0x2688000 0x4000>; + reg-names = "se_phys"; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_scc SCC_QUPV3_SE2_CLK>, + <&clock_scc SCC_QUPV3_M_HCLK_CLK>, + <&clock_scc SCC_QUPV3_S_HCLK_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se22_spi_active>; + pinctrl-1 = <&qupv3_se22_spi_sleep>; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_3>; + status = "disabled"; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi index 8c7e14e42b7fb1664531d2f175da4ca7af4c509f..1821370d47a80993658bd876dc83fcde63898560 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-slpi-pinctrl.dtsi @@ -16,5 +16,183 @@ reg = <0x2B40000 0x20000>; qcom,num-pins = <14>; status = "disabled"; + + qupv3_se20_i2c_pins: qupv3_se20_i2c_pins { + qupv3_se20_i2c_active: qupv3_se20_i2c_active { + mux { + pins = "gpio0", "gpio1"; + function = "func1"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se20_i2c_sleep: qupv3_se20_i2c_sleep { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se21_i2c_pins: qupv3_se21_i2c_pins { + qupv3_se21_i2c_active: qupv3_se21_i2c_active { + mux { + pins = "gpi2", "gpio3"; + function = "func1"; + }; + + config { + pins = "gpio2", "gpio3"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se21_i2c_sleep: qupv3_se21_i2c_sleep { + mux { + pins = "gpio2", "gpio3"; + function = "gpio"; + }; + + config { + pins = "gpio2", "gpio3"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se22_i2c_pins: qupv3_se22_i2c_pins { + qupv3_se22_i2c_active: qupv3_se22_i2c_active { + mux { + pins = "gpio6", "gpio7"; + function = "func1"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se22_i2c_sleep: qupv3_se22_i2c_sleep { + mux { + pins = "gpio6", "gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se23_i2c_pins: qupv3_se23_i2c_pins { + qupv3_se23_i2c_active: qupv3_se23_i2c_active { + mux { + pins = "gpio8", "gpio9"; + function = "func3"; + }; + + config { + pins = "gpio8", "gpio9"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se23_i2c_sleep: qupv3_se23_i2c_sleep { + mux { + pins = "gpio8", "gpio9"; + function = "gpio"; + }; + + config { + pins = "gpio8", "gpio9"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + /* SE21 pin mappings */ + qupv3_se21_spi_pins: qupv3_se21_spi_pins { + qupv3_se21_spi_active: qupv3_se21_spi_active { + mux { + pins = "gpio2", "gpio3", "gpio4", + "gpio5"; + function = "func1"; + }; + + config { + pins = "gpio2", "gpio3", "gpio4", + "gpio5"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se21_spi_sleep: qupv3_se21_spi_sleep { + mux { + pins = "gpio2", "gpio3", "gpio4", + "gpio5"; + function = "gpio"; + }; + + config { + pins = "gpio2", "gpio3", "gpio4", + "gpio5"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /*SE22 pin mappings*/ + qupv3_se22_spi_pins: qupv3_se22_spi_pins { + qupv3_se22_spi_active: qupv3_se22_spi_active { + mux { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + function = "func1"; + }; + + config { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se22_spi_sleep: qupv3_se22_spi_sleep { + mux { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + drive-strength = <6>; + bias-disable; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi index 829c52d90733d2912b0b61fccea792cf5d57f551..50cf62763f2ca67c8660b9bb9c3f79333629955f 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi @@ -44,6 +44,13 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_wlan_1_in: qcom,smp2p-wlan-1-in { + qcom,entry-name = "wlan"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; qcom,smp2p-adsp@1799000c { @@ -144,15 +151,4 @@ }; }; - /* wlan - inbound entry from mss/WLAN PD */ - smp2pgpio_wlan_1_in: qcom,smp2pgpio-wlan-1-in { - compatible = "qcom,smp2pgpio"; - qcom,entry-name = "wlan"; - qcom,remote-pid = <1>; - qcom,is-inbound; - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi index 70ab9971e8c378e55150320c54c9d5b8ecf93641..c050284d4c4e1446947e46050c06d40dc5c31024 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi @@ -554,45 +554,17 @@ >; }; }; -&gpu_opp_table { - compatible = "operating-points-v2"; - opp-700000000 { - opp-hz = /bits/ 64 <585000000>; - opp-microvolt = ; - }; - - opp-675000000 { - opp-hz = /bits/ 64 <585000000>; - opp-microvolt = ; - }; - - opp-585000000 { - opp-hz = /bits/ 64 <585000000>; - opp-microvolt = ; - }; - - opp-427000000 { - opp-hz = /bits/ 64 <427000000>; - opp-microvolt = ; - }; - - opp-345000000 { - opp-hz = /bits/ 64 <345000000>; - opp-microvolt = ; - }; - - opp-257000000 { - opp-hz = /bits/ 64 <257000000>; - opp-microvolt = ; - }; -}; +#include "sm8150-gpu-v2.dtsi" /* GPU overrides */ &msm_gpu { /* Updated chip ID */ qcom,chipid = <0x06040001>; + /* Power level to start throttling */ + qcom,throttle-pwrlevel = <2>; + /* Updated Bus Scale Settings */ qcom,msm-bus,num-cases = <12>; @@ -616,6 +588,10 @@ <26 512 0 7211000>, // 10 bus=1804 <26 512 0 8363000>; // 11 bus=2092 + qcom,initial-pwrlevel = <3>; + + operating-points-v2 = <&gpu_opp_table_v2>; + qcom,gpu-pwrlevels { #address-cells = <1>; #size-cells = <0>; @@ -632,51 +608,37 @@ qcom,gpu-pwrlevel@1 { reg = <1>; - qcom,gpu-freq = <585000000>; - qcom,bus-freq = <7>; - qcom,bus-min = <6>; - qcom,bus-max = <11>; - }; - - qcom,gpu-pwrlevel@2 { - reg = <2>; - qcom,gpu-freq = <585000000>; - qcom,bus-freq = <7>; - qcom,bus-min = <6>; - qcom,bus-max = <11>; - }; - - qcom,gpu-pwrlevel@3 { - reg = <3>; qcom,gpu-freq = <427000000>; qcom,bus-freq = <6>; qcom,bus-min = <5>; - qcom,bus-max = <9>; + qcom,bus-max = <7>; }; - qcom,gpu-pwrlevel@4 { - reg = <4>; + qcom,gpu-pwrlevel@2 { + reg = <2>; qcom,gpu-freq = <345000000>; qcom,bus-freq = <3>; qcom,bus-min = <3>; - qcom,bus-max = <8>; + qcom,bus-max = <5>; }; - qcom,gpu-pwrlevel@5 { - reg = <5>; + qcom,gpu-pwrlevel@3 { + reg = <3>; qcom,gpu-freq = <257000000>; - qcom,bus-freq = <2>; - qcom,bus-min = <1>; - qcom,bus-max = <8>; + qcom,bus-freq = <3>; + qcom,bus-min = <2>; + qcom,bus-max = <4>; }; - qcom,gpu-pwrlevel@6 { - reg = <6>; + qcom,gpu-pwrlevel@4 { + reg = <4>; qcom,gpu-freq = <0>; qcom,bus-freq = <0>; qcom,bus-min = <0>; qcom,bus-max = <0>; }; + /delete-node/ qcom,gpu-pwrlevel@5; + /delete-node/ qcom,gpu-pwrlevel@6; }; qcom,l3-pwrlevels { @@ -702,6 +664,15 @@ }; }; +&gmu { + reg = <0x2c6a000 0x30000>, + <0xb290000 0x10000>, + <0xb490000 0x10000>; + reg-names = "kgsl_gmu_reg", + "kgsl_gmu_pdc_cfg", + "kgsl_gmu_pdc_seq"; +}; + /* NPU overrides */ &msm_npu { iommus = <&apps_smmu 0x1081 0x400>; @@ -953,6 +924,18 @@ < 2841600 1612000000 >; }; +&cpu7_cpu_l3_latmon { + qcom,core-dev-table = + < 300000 300000000 >, + < 825600 614400000 >, + < 1171200 806400000 >, + < 1401600 998400000 >, + < 1708800 1267200000 >, + < 2016000 1344000000 >, + < 2419200 1536000000 >, + < 2841600 1612000000 >; +}; + &cpu0_cpu_llcc_latmon { qcom,core-dev-table = < 300000 MHZ_TO_MBPS( 150, 16) >, @@ -1115,3 +1098,20 @@ USB3_DP_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x07 0 0xffffffff 0xffffffff 0x00>; }; + +/* qcedev override */ +&qcom_cedev { + qcom_cedev_ns_cb { + iommus = <&apps_smmu 0x512 0>, + <&apps_smmu 0x518 0>, + <&apps_smmu 0x519 0>, + <&apps_smmu 0x51f 0>; + }; + + qcom_cedev_s_cb { + iommus = <&apps_smmu 0x513 0>, + <&apps_smmu 0x51c 0>, + <&apps_smmu 0x51d 0>, + <&apps_smmu 0x51e 0>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 0b077bfa980dd286f8258119ba7b98bc02a41e91..4aeaf5e535383a4d5ee6baa1791d6ed232c77eb7 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -574,6 +574,10 @@ firmware: firmware { android { compatible = "android,firmware"; + vbmeta { + compatible = "android,vbmeta"; + parts = "vbmeta,boot,system,vendor,dtbo"; + }; fstab { compatible = "android,fstab"; vendor { @@ -622,67 +626,67 @@ pil_wlan_fw_mem: pil_wlan_fw_region@8bc00000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8bc00000 0x0 0x100000>; + reg = <0x0 0x8bc00000 0x0 0x180000>; }; - pil_ipa_fw_mem: pil_ipa_fw_region@8bd00000 { + pil_npu_mem: pil_npu_region@8bd80000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8bd00000 0x0 0x10000>; + reg = <0x0 0x8bd80000 0x0 0x80000>; }; - pil_ipa_gsi_mem: pil_ipa_gsi_region@8bd10000 { + pil_adsp_mem: pil_adsp_region@8be00000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8bd10000 0x0 0x5000>; + reg = <0x0 0x8be00000 0x0 0x1a00000>; }; - pil_gpu_mem: pil_gpu_region@8bd15000 { + pil_modem_mem: modem_region@8d800000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8bd15000 0x0 0x2000>; + reg = <0x0 0x8d800000 0x0 0x9600000>; }; - pil_npu_mem: pil_npu_region@8bd80000 { + pil_video_mem: pil_video_region@96e00000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8bd80000 0x0 0x80000>; + reg = <0x0 0x96e00000 0x0 0x500000>; }; - pil_adsp_mem: pil_adsp_region@8be00000 { + pil_slpi_mem: pil_slpi_region@97300000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8be00000 0x0 0x1a00000>; + reg = <0x0 0x97300000 0x0 0x1400000>; }; - pil_modem_mem: modem_region@8d800000 { + pil_ipa_fw_mem: pil_ipa_fw_region@98700000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x8d800000 0x0 0x8f00000>; + reg = <0x0 0x98700000 0x0 0x10000>; }; - pil_video_mem: pil_video_region@96700000 { + pil_ipa_gsi_mem: pil_ipa_gsi_region@98710000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x96700000 0x0 0x500000>; + reg = <0x0 0x98710000 0x0 0x5000>; }; - pil_slpi_mem: pil_slpi_region@96c00000 { + pil_gpu_mem: pil_gpu_region@98715000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x96c00000 0x0 0x1400000>; + reg = <0x0 0x98715000 0x0 0x2000>; }; - pil_spss_mem: pil_spss_region@98000000 { + pil_spss_mem: pil_spss_region@98800000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x98000000 0x0 0x100000>; + reg = <0x0 0x98800000 0x0 0x100000>; }; - pil_cdsp_mem: cdsp_regions@98100000 { + pil_cdsp_mem: cdsp_regions@98900000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x98100000 0x0 0x1400000>; + reg = <0x0 0x98900000 0x0 0x1400000>; }; qseecom_mem: qseecom_region@0x9e400000 { @@ -911,6 +915,16 @@ interrupt-parent = <&intc>; }; + gict: gict@17a20000 { + compatible = "arm,gic-600-erp"; + reg = <0x17a20000 0x10000>; + reg-names = "gict-base"; + interrupt-config = <46 17>; + interrupt-names = "gict-fault", "gict-err"; + interrupts = , + ; + }; + pdc: interrupt-controller@0xb220000{ compatible = "qcom,pdc-sm8150"; reg = <0xb220000 0x400>; @@ -1130,7 +1144,7 @@ cpu4_cpu_l3_latmon: qcom,cpu4-cpu-l3-latmon { compatible = "qcom,arm-memlat-mon"; - qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>; + qcom,cpulist = <&CPU4 &CPU5 &CPU6>; qcom,target-dev = <&cpu4_cpu_l3_lat>; qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = @@ -1142,6 +1156,27 @@ < 2016000 1344000000 >; }; + cpu7_cpu_l3_lat: qcom,cpu7-cpu-l3-lat { + compatible = "devfreq-simple-dev"; + clock-names = "devfreq_clk"; + clocks = <&clock_cpucc L3_CLUSTER2_VOTE_CLK>; + governor = "performance"; + }; + + cpu7_cpu_l3_latmon: qcom,cpu7-cpu-l3-latmon { + compatible = "qcom,arm-memlat-mon"; + qcom,cpulist = <&CPU7>; + qcom,target-dev = <&cpu7_cpu_l3_lat>; + qcom,cachemiss-ev = <0x17>; + qcom,core-dev-table = + < 300000 300000000 >, + < 768000 576000000 >, + < 1152000 768000000 >, + < 1344000 960000000 >, + < 1689600 1228800000 >, + < 2016000 1344000000 >; + }; + cpu0_cpu_llcc_lat: qcom,cpu0-cpu-llcc-lat { compatible = "qcom,devbw"; governor = "performance"; @@ -1320,6 +1355,12 @@ qcom,rtb-size = <0x100000>; }; + qcom,aop-ddr-msgs { + compatible = "qcom,aop-ddr-msgs"; + mboxes = <&qmp_aop 0>; + mbox-name = "restart-ddr-mbox"; + }; + qcom,mpm2-sleep-counter@0xc221000 { compatible = "qcom,mpm2-sleep-counter"; reg = <0xc221000 0x1000>; @@ -1493,7 +1534,7 @@ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base", "osm_perfpcl_base"; l3-devs = <&cpu0_cpu_l3_lat &cpu4_cpu_l3_lat &cdsp_cdsp_l3_lat - &msm_gpu>; + &msm_gpu &cpu7_cpu_l3_lat>; #clock-cells = <1>; }; @@ -2655,6 +2696,7 @@ label = "adsp"; qcom,glink-label = "lpass"; + cpu-affinity = <1 2>; qcom,adsp_qrtr { qcom,glink-channels = "IPCRTR"; @@ -2810,6 +2852,21 @@ qcom,glink-channels = "g_glink_audio_data"; qcom,intents = <0x1000 2>; }; + + qcom,diag_data { + qcom,glink-channels = "DIAG_DATA"; + qcom,intents = <0x4000 2>; + }; + + qcom,diag_ctrl { + qcom,glink-channels = "DIAG_CTRL"; + qcom,intents = <0x4000 1>; + }; + + qcom,diag_cmd { + qcom,glink-channels = "DIAG_CMD"; + qcom,intents = <0x4000 1 >; + }; }; }; @@ -3493,6 +3550,7 @@ ipa_hw: qcom,ipa@1e00000 { compatible = "qcom,ipa"; + mboxes = <&qmp_aop 0>; reg = <0x1e00000 0x34000>, <0x1e04000 0x28000>; reg-names = "ipa-base", "gsi-base"; @@ -3607,8 +3665,10 @@ reg = <0xa0000000 0x10000000>, <0xb0000000 0x10000>; reg-names = "smmu_iova_base", "smmu_iova_ipa"; - pinctrl-names = "disabled"; - pinctrl-0 = <>; + wlan-en-gpio = <&tlmm 169 0>; + pinctrl-names = "wlan_en_active", "wlan_en_sleep"; + pinctrl-0 = <&cnss_wlan_en_active>; + pinctrl-1 = <&cnss_wlan_en_sleep>; qcom,wlan-rc-num = <0>; qcom,wlan-ramdump-dynamic = <0x400000>; @@ -3723,8 +3783,6 @@ <0 425 0 /* CE11 */ >; qcom,wlan-msa-memory = <0x100000>; qcom,wlan-msa-fixed-region = <&pil_wlan_fw_mem>; - qcom,gpio-force-fatal-error = <&smp2pgpio_wlan_1_in 0 0>; - qcom,gpio-early-crash-ind = <&smp2pgpio_wlan_1_in 1 0>; vdd-cx-mx-supply = <&pm8150_l1>; vdd-1.8-xo-supply = <&pm8150_l7>; @@ -3732,6 +3790,12 @@ vdd-3.3-ch0-supply = <&pm8150l_l11>; qcom,vdd-cx-mx-config = <752000 752000>; qcom,vdd-3.3-ch0-config = <3104000 3312000>; + qcom,smp2p_map_wlan_1_in { + interrupts-extended = <&smp2p_wlan_1_in 0 0>, + <&smp2p_wlan_1_in 1 0>; + interrupt-names = "qcom,smp2p-force-fatal-error", + "qcom,smp2p-early-crash-ind"; + }; }; wil6210: qcom,wil6210 { diff --git a/arch/arm64/boot/dts/qcom/trinket-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/trinket-pinctrl.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..052cf8e9d8343b299865a0978fb5795b43048dc7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket-pinctrl.dtsi @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + tlmm: pinctrl@400000 { + compatible = "qcom,trinket-pinctrl"; + reg = <0x400000 0xc00000>; + interrupts = <0 227 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/trinket-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/trinket-rumi-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..5b34eacac89b5579da858e0b9694b825bfcafdc8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket-rumi-overlay.dts @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include "trinket-rumi.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. TRINKET RUMI"; + compatible = "qcom,trinket-rumi", "qcom,trinket", "qcom,rumi"; + qcom,msm-id = <394 0x0>; + qcom,board-id = <15 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/trinket-rumi.dts b/arch/arm64/boot/dts/qcom/trinket-rumi.dts new file mode 100644 index 0000000000000000000000000000000000000000..fb74e3e1df9cf5452ee9971c4aecdef857592c12 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket-rumi.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/memreserve/ 0x90000000 0x00000100; + +#include "trinket.dtsi" +#include "trinket-rumi.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. TRINKET RUMI"; + compatible = "qcom,trinket-rumi", "qcom,trinket", "qcom,rumi"; + qcom,board-id = <15 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/trinket-rumi.dtsi b/arch/arm64/boot/dts/qcom/trinket-rumi.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..87612484431ef29b7f8d36a3e875eefb0afb4901 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket-rumi.dtsi @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + timer { + clock-frequency = <800000>; + }; + + timer@f120000 { + clock-frequency = <800000>; + }; + + wdog: qcom,wdt@f017000{ + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/trinket.dts b/arch/arm64/boot/dts/qcom/trinket.dts new file mode 100644 index 0000000000000000000000000000000000000000..29ece99394ee58f9f50200eaf3a29dd2817be004 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket.dts @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "trinket.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. TRINKET SoC"; + compatible = "qcom,trinket"; + qcom,board-id = <0 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/trinket.dtsi b/arch/arm64/boot/dts/qcom/trinket.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..b9fa5e00df68bf3a0c6a872f018fff3c8ac7d2f1 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/trinket.dtsi @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2018, The Linux Foundation.All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "skeleton64.dtsi" +#include + +/ { + model = "Qualcomm Technologies, Inc. TRINKET"; + compatible = "qcom,trinket"; + qcom,msm-id = <394 0x0>; + interrupt-parent = <&intc>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + }; + L1_I_0: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_D_0: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_TLB_0: l1-tlb { + qcom,dump-size = <0x2800>; + }; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x1>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + + L1_I_1: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_D_1: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_TLB_1: l1-tlb { + qcom,dump-size = <0x2800>; + }; + }; + + CPU2: cpu@2 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x2>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + + L1_I_2: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_D_2: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_TLB_2: l1-tlb { + qcom,dump-size = <0x2800>; + }; + }; + + CPU3: cpu@3 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x3>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + + L1_I_3: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_D_3: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9040>; + }; + + L1_TLB_3: l1-tlb { + qcom,dump-size = <0x2800>; + }; + }; + + CPU4: cpu@100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + }; + + L1_I_100: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_D_100: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_TLB_100: l1-tlb { + qcom,dump-size = <0x4800>; + }; + }; + + CPU5: cpu@101 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + + L1_I_101: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_D_101: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_TLB_101: l1-tlb { + qcom,dump-size = <0x4800>; + }; + }; + + CPU6: cpu@102 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x102>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + + L1_I_102: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_D_102: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_TLB_102: l1-tlb { + qcom,dump-size = <0x4800>; + }; + }; + + CPU7: cpu@103 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x103>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + + L1_I_103: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_D_103: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x12000>; + }; + + L1_TLB_103: l1-tlb { + qcom,dump-size = <0x4800>; + }; + }; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + + core1 { + cpu = <&CPU1>; + }; + + core2 { + cpu = <&CPU2>; + }; + + core3 { + cpu = <&CPU3>; + }; + }; + + cluster1 { + core0 { + cpu = <&CPU4>; + }; + + core1 { + cpu = <&CPU5>; + }; + + core2 { + cpu = <&CPU6>; + }; + + core3 { + cpu = <&CPU7>; + }; + }; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + chosen { + bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; + }; + + soc: soc { }; +}; + +&soc { + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0 0xffffffff>; + compatible = "simple-bus"; + + intc: interrupt-controller@f200000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + interrupt-controller; + #redistributor-regions = <1>; + redistributor-stride = <0x0 0x20000>; + reg = <0xf200000 0x10000>, /* GICD */ + <0xf300000 0x100000>; /* GICR * 8 */ + interrupts = <1 9 4>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 1 0xf08>, + <1 2 0xf08>, + <1 3 0xf08>, + <1 0 0xf08>; + clock-frequency = <19200000>; + }; + + timer@f120000 { + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0xf120000 0x1000>; + clock-frequency = <19200000>; + + frame@f121000 { + frame-number = <0>; + interrupts = <0 8 0x4>, + <0 7 0x4>; + reg = <0xf121000 0x1000>, + <0xf122000 0x1000>; + }; + + frame@f123000 { + frame-number = <1>; + interrupts = <0 9 0x4>; + reg = <0xf123000 0x1000>; + status = "disabled"; + }; + + frame@f124000 { + frame-number = <2>; + interrupts = <0 10 0x4>; + reg = <0xf124000 0x1000>; + status = "disabled"; + }; + + frame@f125000 { + frame-number = <3>; + interrupts = <0 11 0x4>; + reg = <0xf125000 0x1000>; + status = "disabled"; + }; + + frame@f126000 { + frame-number = <4>; + interrupts = <0 12 0x4>; + reg = <0xf126000 0x1000>; + status = "disabled"; + }; + + frame@f127000 { + frame-number = <5>; + interrupts = <0 13 0x4>; + reg = <0xf127000 0x1000>; + status = "disabled"; + }; + + frame@f128000 { + frame-number = <6>; + interrupts = <0 14 0x4>; + reg = <0xf128000 0x1000>; + status = "disabled"; + }; + }; + + qcom,msm-imem@c125000 { + compatible = "qcom,msm-imem"; + reg = <0xc125000 0x1000>; + ranges = <0x0 0xc125000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + mem_dump_table@10 { + compatible = "qcom,msm-imem-mem_dump_table"; + reg = <0x10 8>; + }; + + restart_reason@65c { + compatible = "qcom,msm-imem-restart_reason"; + reg = <0x65c 4>; + }; + + dload_type@1c { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x1c 0x4>; + }; + + boot_stats@6b0 { + compatible = "qcom,msm-imem-boot_stats"; + reg = <0x6b0 32>; + }; + + kaslr_offset@6d0 { + compatible = "qcom,msm-imem-kaslr_offset"; + reg = <0x6d0 12>; + }; + + pil@94c { + compatible = "qcom,msm-imem-pil"; + reg = <0x94c 200>; + }; + + diag_dload@c8 { + compatible = "qcom,msm-imem-diag-dload"; + reg = <0xc8 200>; + }; + }; + + restart@440b000 { + compatible = "qcom,pshold"; + reg = <0x440b000 0x4>, + <0x03d3000 0x4>; + reg-names = "pshold-base", "tcsr-boot-misc-detect"; + }; + + qcom,mpm2-sleep-counter@4403000 { + compatible = "qcom,mpm2-sleep-counter"; + reg = <0x4403000 0x1000>; + clock-frequency = <32768>; + }; + + qcom,msm-rtb { + compatible = "qcom,msm-rtb"; + qcom,rtb-size = <0x100000>; + }; + + wdog: qcom,wdt@f017000 { + compatible = "qcom,msm-watchdog"; + reg = <0xf017000 0x1000>; + reg-names = "wdt-base"; + interrupts = <0 3 0>, <0 4 0>; + qcom,bark-time = <11000>; + qcom,pet-time = <9360>; + qcom,ipi-ping; + qcom,wakeup-enable; + qcom,scandump-sizes = <0x40000>; + }; + + cpuss_dump: cpuss_dump { + compatible = "qcom,cpuss-dump"; + qcom,l1_i_cache0 { + qcom,dump-node = <&L1_I_0>; + qcom,dump-id = <0x60>; + }; + qcom,l1_i_cache1 { + qcom,dump-node = <&L1_I_1>; + qcom,dump-id = <0x61>; + }; + qcom,l1_i_cache2 { + qcom,dump-node = <&L1_I_2>; + qcom,dump-id = <0x62>; + }; + qcom,l1_i_cache3 { + qcom,dump-node = <&L1_I_3>; + qcom,dump-id = <0x63>; + }; + qcom,l1_i_cache100 { + qcom,dump-node = <&L1_I_100>; + qcom,dump-id = <0x64>; + }; + qcom,l1_i_cache101 { + qcom,dump-node = <&L1_I_101>; + qcom,dump-id = <0x65>; + }; + qcom,l1_i_cache102 { + qcom,dump-node = <&L1_I_102>; + qcom,dump-id = <0x66>; + }; + qcom,l1_i_cache103 { + qcom,dump-node = <&L1_I_103>; + qcom,dump-id = <0x67>; + }; + qcom,l1_d_cache0 { + qcom,dump-node = <&L1_D_0>; + qcom,dump-id = <0x80>; + }; + qcom,l1_d_cache1 { + qcom,dump-node = <&L1_D_1>; + qcom,dump-id = <0x81>; + }; + qcom,l1_d_cache2 { + qcom,dump-node = <&L1_D_2>; + qcom,dump-id = <0x82>; + }; + qcom,l1_d_cache3 { + qcom,dump-node = <&L1_D_3>; + qcom,dump-id = <0x83>; + }; + qcom,l1_d_cache100 { + qcom,dump-node = <&L1_D_100>; + qcom,dump-id = <0x84>; + }; + qcom,l1_d_cache101 { + qcom,dump-node = <&L1_D_101>; + qcom,dump-id = <0x85>; + }; + qcom,l1_d_cache102 { + qcom,dump-node = <&L1_D_102>; + qcom,dump-id = <0x86>; + }; + qcom,l1_d_cache103 { + qcom,dump-node = <&L1_D_103>; + qcom,dump-id = <0x87>; + }; + qcom,l1_tlb_dump0 { + qcom,dump-node = <&L1_TLB_0>; + qcom,dump-id = <0x20>; + }; + qcom,l1_tlb_dump1 { + qcom,dump-node = <&L1_TLB_1>; + qcom,dump-id = <0x21>; + }; + qcom,l1_tlb_dump2 { + qcom,dump-node = <&L1_TLB_2>; + qcom,dump-id = <0x22>; + }; + qcom,l1_tlb_dump3 { + qcom,dump-node = <&L1_TLB_3>; + qcom,dump-id = <0x23>; + }; + qcom,l1_tlb_dump100 { + qcom,dump-node = <&L1_TLB_100>; + qcom,dump-id = <0x24>; + }; + qcom,l1_tlb_dump101 { + qcom,dump-node = <&L1_TLB_101>; + qcom,dump-id = <0x25>; + }; + qcom,l1_tlb_dump102 { + qcom,dump-node = <&L1_TLB_102>; + qcom,dump-id = <0x26>; + }; + qcom,l1_tlb_dump103 { + qcom,dump-node = <&L1_TLB_103>; + qcom,dump-id = <0x27>; + }; + }; +}; +#include "trinket-pinctrl.dtsi" diff --git a/arch/arm64/configs/sdmsteppe-perf_defconfig b/arch/arm64/configs/sdmsteppe-perf_defconfig deleted file mode 120000 index 7b9298c42bcb0e921b81d92a268030a8c7d5a855..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sdmsteppe-perf_defconfig +++ /dev/null @@ -1 +0,0 @@ -vendor/sdmsteppe-perf_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sdmsteppe_defconfig b/arch/arm64/configs/sdmsteppe_defconfig deleted file mode 120000 index 502031ffe286e948376e7ccfd0df61f2c806dcbb..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sdmsteppe_defconfig +++ /dev/null @@ -1 +0,0 @@ -vendor/sdmsteppe_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig index a5f468e18a32d13cb806f60983ab613bcc13ba4e..ec0bddbaaeb422b44f4746796118300faafdb08a 100644 --- a/arch/arm64/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig @@ -203,6 +203,7 @@ CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_QPNP_MISC=y CONFIG_SCSI=y @@ -224,7 +225,7 @@ CONFIG_DM_VERITY_FEC=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_KS8851=y +CONFIG_AT803X_PHY=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -260,6 +261,7 @@ CONFIG_INPUT_GPIO=y # CONFIG_DEVMEM is not set CONFIG_SERIAL_MSM_HS=y CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_DIAG_CHAR=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y @@ -269,6 +271,7 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y @@ -312,6 +315,7 @@ CONFIG_FB=y CONFIG_FB_MSM=y CONFIG_FB_MSM_MDSS=y CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_SPI_PANEL=y CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_SOUND=y @@ -368,6 +372,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -443,6 +448,7 @@ CONFIG_QCOM_KGSL=y CONFIG_QTI_MPM=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y +CONFIG_MSM_TZ_LOG=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y @@ -490,3 +496,5 @@ CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig index bdf2141623517e023f10d5fdc8776614e7758926..6520f3d01e641346cb2beb7ce085dd9c64316f32 100644 --- a/arch/arm64/configs/vendor/qcs405_defconfig +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -165,6 +165,11 @@ CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y CONFIG_BRIDGE_NF_EBTABLES=y CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_SNAT=y CONFIG_L2TP=y CONFIG_L2TP_DEBUGFS=y CONFIG_L2TP_V3=y @@ -269,6 +274,7 @@ CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_MSM_HS=y CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_DIAG_CHAR=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y @@ -280,6 +286,7 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_QCS405=y CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y @@ -384,6 +391,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -549,3 +557,5 @@ CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index b902162ba3d4102e0a03ccf7c1c7811c33fb872c..aadc30a38b098cbdc19329d65b19ab7d40b034e4 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -330,6 +330,7 @@ CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_SX150X=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_SM8150=y +CONFIG_PINCTRL_SLPI=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y @@ -475,6 +476,7 @@ CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_EP_PCIE=y CONFIG_EP_PCIE_HW=y CONFIG_USB_BAM=y +CONFIG_MSM_MHI_DEV=y CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y CONFIG_RMNET_IPA3=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index de9e0ec721004c6949d49f3aa001150480941c22..d1495301aad2d9d8aadba0f30d558993f371efa6 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -345,6 +345,7 @@ CONFIG_PTP_1588_CLOCK=y CONFIG_PINCTRL_SX150X=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_SM8150=y +CONFIG_PINCTRL_SLPI=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y CONFIG_QCOM_DLOAD_MODE=y @@ -498,6 +499,7 @@ CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_EP_PCIE=y CONFIG_EP_PCIE_HW=y CONFIG_USB_BAM=y +CONFIG_MSM_MHI_DEV=y CONFIG_IPA3=y CONFIG_IPA_WDI_UNIFIED_API=y CONFIG_RMNET_IPA3=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index 5b996869e990eb9a31bb3791423349c72606b009..93741d0675773b1dfb9862d9fb362d8639db2767 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -1,5 +1,6 @@ CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -15,6 +16,8 @@ CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -71,6 +74,7 @@ CONFIG_SETEND_EMULATION=y CONFIG_RANDOMIZE_BASE=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_PM_AUTOSLEEP=y @@ -226,6 +230,8 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y CONFIG_SOCKEV_NLMCAST=y +CONFIG_CAN=y +CONFIG_QTI_CAN=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_CFG80211=y @@ -246,6 +252,7 @@ CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y CONFIG_QPNP_MISC=y +CONFIG_FPR_FPC=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y @@ -255,8 +262,10 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -307,9 +316,12 @@ CONFIG_INPUT_UINPUT=y CONFIG_SERIAL_MSM_GENI=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_DIAG_CHAR=y +CONFIG_MSM_FASTCVPD=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_QCOM_GENI=y CONFIG_SPI=y CONFIG_SPI_QCOM_GENI=y @@ -320,6 +332,7 @@ CONFIG_PM8150_PMIC_SIMULATOR=y CONFIG_PM8150B_PMIC_SIMULATOR=y CONFIG_PM8150L_PMIC_SIMULATOR=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_SX150X=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_SDMMAGPIE=y CONFIG_PINCTRL_SM6150=y @@ -359,6 +372,7 @@ CONFIG_REGULATOR_RPMH=y CONFIG_REGULATOR_STUB=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y @@ -370,10 +384,14 @@ CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y CONFIG_MSM_NPU=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_SW=y CONFIG_DRM=y CONFIG_DRM_MSM_REGISTER_LOGGING=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y CONFIG_DRM_SDE_RSC=y +CONFIG_DRM_ANALOGIX_ANX7625=y CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y CONFIG_LOGO=y @@ -442,6 +460,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -464,6 +483,7 @@ CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ION=y +CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE=y CONFIG_QCOM_GENI_SE=y CONFIG_QPNP_REVID=y CONFIG_SPS=y @@ -532,6 +552,8 @@ CONFIG_MSM_PIL=y CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y +CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000 +CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y CONFIG_QCOM_DCC_V2=y @@ -550,12 +572,15 @@ CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_PM=y CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y +CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -567,6 +592,7 @@ CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y @@ -592,6 +618,7 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -626,12 +653,14 @@ CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_FORTIFY_SOURCE=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y @@ -639,6 +668,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index 2f2fefd44afc727a25accb7d0268c54283702194..54f1ef8efa590ec7e80a928f3b93fa1812ef62a7 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -14,6 +15,8 @@ CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -74,6 +77,7 @@ CONFIG_SETEND_EMULATION=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_PM_AUTOSLEEP=y @@ -232,6 +236,8 @@ CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y CONFIG_SOCKEV_NLMCAST=y +CONFIG_CAN=y +CONFIG_QTI_CAN=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_CFG80211=y @@ -253,6 +259,7 @@ CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y CONFIG_QPNP_MISC=y +CONFIG_FPR_FPC=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_SG=y @@ -263,9 +270,11 @@ CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -274,7 +283,6 @@ CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_RMNET=y -CONFIG_PHYLIB=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -287,6 +295,7 @@ CONFIG_PPPOLAC=y CONFIG_PPPOPNS=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y CONFIG_WIL6210=m CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CLD_LL_CORE=y @@ -311,15 +320,19 @@ CONFIG_INPUT_UINPUT=y # CONFIG_SERIO_SERPORT is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set CONFIG_SERIAL_MSM_GENI=y CONFIG_SERIAL_MSM_GENI_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y CONFIG_TTY_PRINTK=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_DIAG_CHAR=y +CONFIG_MSM_FASTCVPD=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_QCOM_GENI=y CONFIG_SPI=y CONFIG_SPI_QCOM_GENI=y @@ -330,6 +343,7 @@ CONFIG_PM8150_PMIC_SIMULATOR=y CONFIG_PM8150B_PMIC_SIMULATOR=y CONFIG_PM8150L_PMIC_SIMULATOR=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_SX150X=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_SDMMAGPIE=y CONFIG_PINCTRL_SM6150=y @@ -369,6 +383,7 @@ CONFIG_REGULATOR_RPMH=y CONFIG_REGULATOR_STUB=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y @@ -380,10 +395,14 @@ CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y CONFIG_MSM_NPU=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_SW=y CONFIG_DRM=y CONFIG_DRM_MSM_REGISTER_LOGGING=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y CONFIG_DRM_SDE_RSC=y +CONFIG_DRM_ANALOGIX_ANX7625=y CONFIG_FB_VIRTUAL=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y @@ -455,6 +474,7 @@ CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -481,6 +501,7 @@ CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ION=y +CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE=y CONFIG_QCOM_GENI_SE=y CONFIG_QPNP_REVID=y CONFIG_SPS=y @@ -550,6 +571,8 @@ CONFIG_MSM_PIL=y CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y +CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000 +CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000 CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y @@ -575,9 +598,11 @@ CONFIG_QCOM_SMCINVOKE=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_PM=y CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y +CONFIG_QCOM_CDSP_RM=y CONFIG_QCOM_CX_IPEAK=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y @@ -589,6 +614,7 @@ CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y @@ -616,6 +642,7 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_F2FS_FS=y CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -661,7 +688,6 @@ CONFIG_PANIC_ON_SCHED_BUG=y CONFIG_PANIC_ON_RT_THROTTLING=y CONFIG_SCHEDSTATS=y CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_ATOMIC_SLEEP=y @@ -703,6 +729,7 @@ CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y @@ -710,6 +737,7 @@ CONFIG_HARDENED_USERCOPY_PAGESPAN=y CONFIG_FORTIFY_SOURCE=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y @@ -717,6 +745,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index 3daa53ecab0db3113841395433f2819c6f29846c..2cb249ef4b0a54d68a00e7fa999608ff976cc733 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -83,6 +83,7 @@ CONFIG_RANDOMIZE_BASE=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y +CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_PM_AUTOSLEEP=y @@ -582,6 +583,7 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y +CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 9b18221f1bf2c5b147677845df9e75e7a98a6243..11c301e30c9fecc37c5fa56553e27d9cb8055464 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -89,6 +89,7 @@ CONFIG_SETEND_EMULATION=y CONFIG_RANDOMIZE_BASE=y CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y +CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_PM_AUTOSLEEP=y @@ -413,7 +414,6 @@ CONFIG_DRM=y CONFIG_DRM_MSM_REGISTER_LOGGING=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y CONFIG_DRM_SDE_RSC=y -CONFIG_FB_VIRTUAL=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y @@ -504,6 +504,7 @@ CONFIG_RTC_DRV_QPNP=y CONFIG_DMADEVICES=y CONFIG_QCOM_GPI_DMA=y CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_DEBUG_DMA_BUF_REF=y CONFIG_UIO=y CONFIG_UIO_MSM_SHAREDMEM=y CONFIG_STAGING=y @@ -609,6 +610,7 @@ CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CDSP_RM=y +CONFIG_QCOM_AOP_DDR_MESSAGING=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -664,6 +666,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_MODULE_LOAD_INFO=y CONFIG_DEBUG_INFO=y CONFIG_PAGE_OWNER=y CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y diff --git a/arch/arm64/configs/vendor/trinket_defconfig b/arch/arm64/configs/vendor/trinket_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..501fe382c3e5c308f92e0f447c2005a4d1eedbb0 --- /dev/null +++ b/arch/arm64/configs/vendor/trinket_defconfig @@ -0,0 +1,676 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_TRINKET=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_SECCOMP=y +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_SOCKEV_NLMCAST=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_WCD_IRQ=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_QPNP_MISC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_RMNET=y +CONFIG_PHYLIB=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_HIMAX_CHIPSET=y +CONFIG_TOUCHSCREEN_HIMAX_I2C=y +CONFIG_TOUCHSCREEN_HIMAX_INCELL=y +CONFIG_TOUCHSCREEN_HIMAX_IC_HX83112=y +CONFIG_TOUCHSCREEN_HIMAX_DEBUG=y +CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE=y +CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV=y +CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y +CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS=y +CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SERIAL_MSM_GENI_CONSOLE=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_TTY_PRINTK=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_SIMULATOR=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_TRINKET=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_QG=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB5=y +CONFIG_SMB1390_CHARGE_PUMP=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_ADC_TM=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_DRM=y +CONFIG_DRM_MSM_REGISTER_LOGGING=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_USB_LINK_LAYER_TEST=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_MSM_HSUSB_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQ_HCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_QCOM_GENI_SE=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_MSM_11AD=m +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_GLINK_SPI=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QCOM_SMP2P=y +CONFIG_QPNP_PBS=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y +CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_MSM_GLADIATOR_HANG_DETECT=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QSEE_IPC_IRQ=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_QCOM_SMP2P_SLEEPSTATE=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_PHY_XGENE=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_LLCC_PMU=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SENSORS_SSC=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EFIVAR_FS=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_TEST_USER_COPY=m +CONFIG_MEMTEST=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_TGU=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_XZ_DEC=y diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 43393208229eb8d64ec476f5b7b098bc73f498f9..18899288c3b9ad0a14b3be63da596b8146abb8fe 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -49,7 +49,7 @@ static inline unsigned long __my_cpu_offset(void) static inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ { \ - unsigned long loop, ret; \ + unsigned long loop, ret = 0; \ \ switch (size) { \ case 1: \ @@ -106,7 +106,7 @@ PERCPU_OP(or, orr) static inline unsigned long __percpu_read(void *ptr, int size) { - unsigned long ret; + unsigned long ret = 0; switch (size) { case 1: @@ -151,7 +151,7 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size) static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { - unsigned long ret, loop; + unsigned long ret = 0, loop; switch (size) { case 1: diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 354d8c361091608cac018ef104419474599b945b..b6aeee12a33c46e0152cf917403e7898d878bf8b 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -677,6 +677,41 @@ static void armv8pmu_disable_event(struct perf_event *event) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } +#ifdef CONFIG_KRYO_PMU_WORKAROUND +static inline u32 armv8pmu_get_enabled_ints(void) +{ + u32 int_enset; + + int_enset = read_sysreg(pmintenset_el1); + write_sysreg(0xffffffff, pmintenclr_el1); + isb(); + return int_enset; +} + +static inline u32 armv8pmu_update_enabled_ints(u32 value, int idx, int set) +{ + if (set) + value |= BIT(ARMV8_IDX_TO_COUNTER(idx)); + else + value &= ~(BIT(ARMV8_IDX_TO_COUNTER(idx))); + + return value; +} + +static inline void armv8pmu_set_enabled_ints(u32 mask) +{ + write_sysreg(mask, pmintenset_el1); + isb(); +} +#else +static inline u32 armv8pmu_get_enabled_ints(void) +{ return 0; } + +static inline u32 armv8pmu_update_enabled_ints(u32 value, int idx, int set) +{ return value; } + +static inline void armv8pmu_set_enabled_ints(u32 mask) { } +#endif static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) { @@ -686,6 +721,12 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; + u32 enabled_ints; + + /* + * Get enabled the PMU interrupts and mask all PMU interrupts. + */ + enabled_ints = armv8pmu_get_enabled_ints(); /* * Get and reset the IRQ flags @@ -724,8 +765,16 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) + if (perf_event_overflow(event, &data, regs)) { cpu_pmu->disable(event); + + /* + * Update the list of interrupts + * that should be reenabled. + */ + enabled_ints = armv8pmu_update_enabled_ints( + enabled_ints, idx, 0); + } } /* @@ -737,6 +786,11 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) */ irq_work_run(); + /* + * Re-enable the PMU interrupts + */ + armv8pmu_set_enabled_ints(enabled_ints); + return IRQ_HANDLED; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9e493cab40a963e3de7ed913a23fc95810a0babe..573105191e44c89a6e826a225a6524be563ad400 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -125,6 +125,7 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); + pr_alert(" FSC = %lu\n", (esr & ESR_ELx_FSC)); if (esr_is_data_abort(esr)) data_abort_decode(esr); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 05edcadb723b6dd9b423241a4037f5fee0706577..f749e1d50cf15e6b7f49506265b77653f0d7a60a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -695,27 +695,11 @@ void __init paging_init(void) */ void hotplug_paging(phys_addr_t start, phys_addr_t size) { - - struct page *pg; - phys_addr_t pgd_phys = pgd_pgtable_alloc(); - pgd_t *pgd = pgd_set_fixmap(pgd_phys); int flags; - memcpy(pgd, swapper_pg_dir, PAGE_SIZE); flags = debug_pagealloc_enabled() ? NO_BLOCK_MAPPINGS : 0; - - __create_pgd_mapping(pgd, start, __phys_to_virt(start), size, + __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), size, PAGE_KERNEL, pgd_pgtable_alloc, flags); - - cpu_replace_ttbr1(__va(pgd_phys)); - memcpy(swapper_pg_dir, pgd, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); - - pgd_clear_fixmap(); - - pg = phys_to_page(pgd_phys); - pgtable_page_dtor(pg); - __free_pages(pg, 0); } #ifdef CONFIG_MEMORY_HOTREMOVE @@ -1396,12 +1380,12 @@ int pmd_clear_huge(pmd_t *pmd) return 1; } -int pud_free_pmd_page(pud_t *pud) +int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return pud_none(*pud); } -int pmd_free_pte_page(pmd_t *pmd) +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return pmd_none(*pmd); } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 1fd3eb5b66c6c6586c028b9002dcb185f6d54e56..89e684fd795f1501a4dfc279b5f01d4919724079 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -201,7 +201,7 @@ config PREFETCH config MLONGCALLS bool "Enable the -mlong-calls compiler option for big kernels" - def_bool y if (!MODULES) + default y depends on PA8X00 help If you configure the kernel to include many drivers built-in instead diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..dbaaca84f27f342ef1c1b8c743e66fa4d6a6f8eb --- /dev/null +++ b/arch/parisc/include/asm/barrier.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +/* The synchronize caches instruction executes as a nop on systems in + which all memory references are performed in order. */ +#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") + +#if defined(CONFIG_SMP) +#define mb() do { synchronize_caches(); } while (0) +#define rmb() mb() +#define wmb() mb() +#define dma_rmb() mb() +#define dma_wmb() mb() +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#define __smp_mb() mb() +#define __smp_rmb() mb() +#define __smp_wmb() mb() + +#include + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index e95207c0565eb12308e12d48c45bd5309ae6e2ae..1b4732e201374adc2721ab7141e5ed9b5bf9bd69 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -481,6 +481,8 @@ /* Release pa_tlb_lock lock without reloading lock address. */ .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 + sync or,COND(=) %r0,\spc,%r0 stw \spc,0(\tmp) #endif diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 67b0f7532e835f4db1214c6ccecf62183eb84e50..3e163df49cf30210c441324ca7d0424ddf701e49 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -354,6 +354,7 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_unlock la,flags,tmp #ifdef CONFIG_SMP ldi 1,\tmp + sync stw \tmp,0(\la) mtsm \flags #endif diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index e775f80ae28c5ab8d7fac7456235fa4f415c7215..4886a6db42e98f2b1fc4c58916ee68dfc98df008 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -633,6 +633,7 @@ cas_action: sub,<> %r28, %r25, %r0 2: stw,ma %r24, 0(%r26) /* Free lock */ + sync stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ @@ -647,6 +648,7 @@ cas_action: 3: /* Error occurred on load or store */ /* Free lock */ + sync stw %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) @@ -848,6 +850,7 @@ cas2_action: cas2_end: /* Free lock */ + sync stw,ma %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 @@ -858,6 +861,7 @@ cas2_end: 22: /* Error occurred on load or store */ /* Free lock */ + sync stw %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 1(%r0),%r28 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7483cd514c32555dffb43aef80c7c699218e5cd1..1c63a4b5320da6139f5f334f967677c1165a7690 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -176,6 +176,7 @@ config X86 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER + select HOTPLUG_SMT if SMP select IRQ_FORCED_THREADING select PCI_LOCKLESS_CONFIG select PERF_EVENTS diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S index 16c4ccb1f154883017062c90d7ca0fc22dc67e29..d2364c55bbdeb6513730e646183cf7a925eac1e2 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S @@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest(state , idx, 4) , %xmm0 + vmovd _args_digest+4*32(state, idx, 4), %xmm1 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5f01671c68f267ccc3b3aedd518134453162733a..a1ed92aae12a6652210e392347cc54bc0b34f29a 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -10,6 +10,7 @@ #include #include #include +#include #define ARCH_APICTIMER_STOPS_ON_C3 1 @@ -613,12 +614,20 @@ extern int default_check_phys_apicid_present(int phys_apicid); #endif #endif /* CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_SMP +bool apic_id_is_primary_thread(unsigned int id); +#else +static inline bool apic_id_is_primary_thread(unsigned int id) { return false; } +#endif + extern void irq_enter(void); extern void irq_exit(void); static inline void entering_irq(void) { irq_enter(); + kvm_set_cpu_l1tf_flush_l1d(); } static inline void entering_ack_irq(void) @@ -631,6 +640,7 @@ static inline void ipi_entering_ack_irq(void) { irq_enter(); ack_APIC_irq(); + kvm_set_cpu_l1tf_flush_l1d(); } static inline void exiting_irq(void) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 403e97d5e24322775dc01953ef32f8f4e3dd9276..8418462298e719bf756a9c0745afb24f8f5b21a1 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -219,6 +219,7 @@ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ +#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ @@ -338,6 +339,7 @@ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ @@ -370,5 +372,6 @@ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ +#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h index 0ab2ab27ad1fb59993fb651f69f49df50e857ff9..b825cb201251654f7a085b0b612a0b574979fa2b 100644 --- a/arch/x86/include/asm/dmi.h +++ b/arch/x86/include/asm/dmi.h @@ -4,8 +4,8 @@ #include #include +#include -#include #include static __always_inline __init void *dmi_alloc(unsigned len) diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 51cc979dd3642ff4ad743ba4efcd38c97a6877b9..486c843273c46fe6c102fdce5997d8d9ed211a80 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -3,10 +3,12 @@ #define _ASM_X86_HARDIRQ_H #include -#include typedef struct { - unsigned int __softirq_pending; + u16 __softirq_pending; +#if IS_ENABLED(CONFIG_KVM_INTEL) + u8 kvm_cpu_l1tf_flush_l1d; +#endif unsigned int __nmi_count; /* arch dependent */ #ifdef CONFIG_X86_LOCAL_APIC unsigned int apic_timer_irqs; /* arch dependent */ @@ -62,4 +64,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu); extern u64 arch_irq_stat(void); #define arch_irq_stat arch_irq_stat + +#if IS_ENABLED(CONFIG_KVM_INTEL) +static inline void kvm_set_cpu_l1tf_flush_l1d(void) +{ + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); +} + +static inline void kvm_clear_cpu_l1tf_flush_l1d(void) +{ + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0); +} + +static inline bool kvm_get_cpu_l1tf_flush_l1d(void) +{ + return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); +} +#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ +static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } +#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ + #endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index 5cdcdbd4d892029f7cbd90ab30dfe0832e5780f4..89789e8c80f66a97db08ffad891ed26e5025219e 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -3,6 +3,7 @@ #define _ASM_X86_I8259_H #include +#include extern unsigned int cached_irq_mask; diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c4fc17220df959f2d5feb493af6374e7dacce613..c14f2a74b2be7495f1ee00c92322a58cc43d10a6 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -13,6 +13,8 @@ * Interrupt control: */ +/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ +extern inline unsigned long native_save_fl(void); extern inline unsigned long native_save_fl(void) { unsigned long flags; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 174b9c41efce00d67d034616afcc974a7e51fe2f..4015b88383ce98ad5cbc828f098bb0d3b8b16598 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -506,6 +507,7 @@ struct kvm_vcpu_arch { u64 smbase; bool tpr_access_reporting; u64 ia32_xss; + u64 microcode_version; /* * Paging state of the vcpu @@ -693,6 +695,9 @@ struct kvm_vcpu_arch { /* be preempted when it's in kernel-mode(cpl=0) */ bool preempted_in_kernel; + + /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ + bool l1tf_flush_l1d; }; struct kvm_lpage_info { @@ -862,6 +867,7 @@ struct kvm_vcpu_stat { u64 signal_exits; u64 irq_window_exits; u64 nmi_window_exits; + u64 l1d_flush; u64 halt_exits; u64 halt_successful_poll; u64 halt_attempted_poll; @@ -1061,6 +1067,8 @@ struct kvm_x86_ops { void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); void (*setup_mce)(struct kvm_vcpu *vcpu); + + int (*get_msr_feature)(struct kvm_msr_entry *entry); }; struct kvm_arch_async_pf { @@ -1366,6 +1374,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); +u64 kvm_get_arch_capabilities(void); void kvm_define_shared_msr(unsigned index, u32 msr); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 504b21692d3277d37ee1d22bf1367faec9d4db97..ef7eec669a1bcc5d1ff80a50920b4e74ba137e3b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -70,12 +70,19 @@ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ #define ARCH_CAP_SSB_NO (1 << 4) /* * Not susceptible to Speculative Store Bypass * attack, so no Speculative Store Bypass * control required. */ +#define MSR_IA32_FLUSH_CMD 0x0000010b +#define L1D_FLUSH (1 << 0) /* + * Writeback and invalidate the + * L1 data cache. + */ + #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index aa30c3241ea7da812da88c4f4151c9139db2845f..0d5c739eebd715f387d37a81a52b837119daf9ad 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -29,8 +29,13 @@ #define N_EXCEPTION_STACKS 1 #ifdef CONFIG_X86_PAE -/* 44=32+12, the limit we can fit into an unsigned long pfn */ -#define __PHYSICAL_MASK_SHIFT 44 +/* + * This is beyond the 44 bit limit imposed by the 32bit long pfns, + * but we need the full mask to make sure inverted PROT_NONE + * entries have all the host bits set in a guest. + * The real limit is still 44 bits. + */ +#define __PHYSICAL_MASK_SHIFT 52 #define __VIRTUAL_MASK_SHIFT 32 #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 685ffe8a0eaf84d5a3b0374cf7ef31cb3e51bb7c..60d0f90153178b3fb104360536854d676f7429ee 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -95,4 +95,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) +/* No inverted PFNs on 2 level page tables */ + +static inline u64 protnone_mask(u64 val) +{ + return 0; +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) +{ + return val; +} + +static inline bool __pte_needs_invert(u64 val) +{ + return false; +} + #endif /* _ASM_X86_PGTABLE_2LEVEL_H */ diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index bc4af5453802559af4c1f14a2e3a3dbaf96e57dc..9dc19b4a2a870398e57239c9af545924713b0b49 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -206,12 +206,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp) #endif /* Encode and de-code a swap entry */ +#define SWP_TYPE_BITS 5 + +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) + +/* We always extract/encode the offset by shifting it all the way up, and then down again */ +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) + #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) -#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) + +/* + * Normally, __swp_entry() converts from arch-independent swp_entry_t to + * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result + * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the + * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to + * __swp_entry_to_pte() through the following helper macro based on 64bit + * __swp_entry(). + */ +#define __swp_pteval_entry(type, offset) ((pteval_t) { \ + (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ + | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) + +#define __swp_entry_to_pte(x) ((pte_t){ .pte = \ + __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) +/* + * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent + * swp_entry_t, but also has to convert it from 64bit to the 32bit + * intermediate representation, using the following macros based on 64bit + * __swp_type() and __swp_offset(). + */ +#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) +#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) + +#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ + __pteval_swp_offset(pte))) #define gup_get_pte gup_get_pte /* @@ -260,4 +291,6 @@ static inline pte_t gup_get_pte(pte_t *ptep) return pte; } +#include + #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h new file mode 100644 index 0000000000000000000000000000000000000000..a0c1525f1b6f417448bebb5898a48b71f5b86c0d --- /dev/null +++ b/arch/x86/include/asm/pgtable-invert.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PGTABLE_INVERT_H +#define _ASM_PGTABLE_INVERT_H 1 + +#ifndef __ASSEMBLY__ + +/* + * A clear pte value is special, and doesn't get inverted. + * + * Note that even users that only pass a pgprot_t (rather + * than a full pte) won't trigger the special zero case, + * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED + * set. So the all zero case really is limited to just the + * cleared page table entry case. + */ +static inline bool __pte_needs_invert(u64 val) +{ + return val && !(val & _PAGE_PRESENT); +} + +/* Get a mask to xor with the page table entry to get the correct pfn. */ +static inline u64 protnone_mask(u64 val) +{ + return __pte_needs_invert(val) ? ~0ull : 0; +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) +{ + /* + * When a PTE transitions from NONE to !NONE or vice-versa + * invert the PFN part to stop speculation. + * pte_pfn undoes this when needed. + */ + if (__pte_needs_invert(oldval) != __pte_needs_invert(val)) + val = (val & ~mask) | (~val & mask); + return val; +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5c790e93657d7615e2f28809a28d898248372a84..6a4b1a54ff479cf2ff018470351de45890625582 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -185,19 +185,29 @@ static inline int pte_special(pte_t pte) return pte_flags(pte) & _PAGE_SPECIAL; } +/* Entries that were set to PROT_NONE are inverted */ + +static inline u64 protnone_mask(u64 val); + static inline unsigned long pte_pfn(pte_t pte) { - return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; + phys_addr_t pfn = pte_val(pte); + pfn ^= protnone_mask(pfn); + return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; } static inline unsigned long pmd_pfn(pmd_t pmd) { - return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; + phys_addr_t pfn = pmd_val(pmd); + pfn ^= protnone_mask(pfn); + return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; } static inline unsigned long pud_pfn(pud_t pud) { - return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; + phys_addr_t pfn = pud_val(pud); + pfn ^= protnone_mask(pfn); + return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; } static inline unsigned long p4d_pfn(p4d_t p4d) @@ -400,11 +410,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_RW); } -static inline pmd_t pmd_mknotpresent(pmd_t pmd) -{ - return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); -} - static inline pud_t pud_set_flags(pud_t pud, pudval_t set) { pudval_t v = native_pud_val(pud); @@ -459,11 +464,6 @@ static inline pud_t pud_mkwrite(pud_t pud) return pud_set_flags(pud, _PAGE_RW); } -static inline pud_t pud_mknotpresent(pud_t pud) -{ - return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); -} - #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline int pte_soft_dirty(pte_t pte) { @@ -528,25 +528,45 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PTE_PFN_MASK; + return __pte(pfn | massage_pgprot(pgprot)); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { - return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PHYSICAL_PMD_PAGE_MASK; + return __pmd(pfn | massage_pgprot(pgprot)); } static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) { - return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PHYSICAL_PUD_PAGE_MASK; + return __pud(pfn | massage_pgprot(pgprot)); } +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + return pfn_pmd(pmd_pfn(pmd), + __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); +} + +static inline pud_t pud_mknotpresent(pud_t pud) +{ + return pfn_pud(pud_pfn(pud), + __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - pteval_t val = pte_val(pte); + pteval_t val = pte_val(pte), oldval = val; /* * Chop off the NX bit (if present), and add the NX portion of @@ -554,17 +574,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) */ val &= _PAGE_CHG_MASK; val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; - + val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); return __pte(val); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - pmdval_t val = pmd_val(pmd); + pmdval_t val = pmd_val(pmd), oldval = val; val &= _HPAGE_CHG_MASK; val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; - + val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); return __pmd(val); } @@ -1274,6 +1294,14 @@ static inline bool pud_access_permitted(pud_t pud, bool write) return __pte_access_permitted(pud_val(pud), write); } +#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 +extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); + +static inline bool arch_has_pfn_modify_check(void) +{ + return boot_cpu_has_bug(X86_BUG_L1TF); +} + #include #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 1149d2112b2e17347e8f85c4cae355f4522cc40a..4ecb728319384759eeb9c96eb94bbec7faf79aba 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -276,7 +276,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } * * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names - * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry + * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry * * G (8) is aliased and used as a PROT_NONE indicator for * !present ptes. We need to start storing swap entries above @@ -289,20 +289,34 @@ static inline int pgd_large(pgd_t pgd) { return 0; } * * Bit 7 in swp entry should be 0 because pmd_present checks not only P, * but also L and G. + * + * The offset is inverted by a binary not operation to make the high + * physical bits set. */ -#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) -#define SWP_TYPE_BITS 5 -/* Place the offset above the type: */ -#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) +#define SWP_TYPE_BITS 5 + +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) + +/* We always extract/encode the offset by shifting it all the way up, and then down again */ +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) -#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ - & ((1U << SWP_TYPE_BITS) - 1)) -#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) -#define __swp_entry(type, offset) ((swp_entry_t) { \ - ((type) << (SWP_TYPE_FIRST_BIT)) \ - | ((offset) << SWP_OFFSET_FIRST_BIT) }) +/* Extract the high bits for type */ +#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS)) + +/* Shift up (to get rid of type), then down to get value */ +#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT) + +/* + * Shift the offset up "too far" by TYPE bits, then down again + * The offset is inverted by a binary not operation to make the high + * physical bits set. + */ +#define __swp_entry(type, offset) ((swp_entry_t) { \ + (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ + | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) }) + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) @@ -346,5 +360,7 @@ static inline bool gup_fast_permitted(unsigned long start, int nr_pages, return true; } +#include + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_64_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3222c7746cb1f857b686dbbdf5d06c475cf6aaad..0e856c0628b350dd6d1aca368e213a927156da07 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -180,6 +180,11 @@ extern const struct seq_operations cpuinfo_op; extern void cpu_detect(struct cpuinfo_x86 *c); +static inline unsigned long l1tf_pfn_limit(void) +{ + return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; +} + extern void early_cpu_init(void); extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); @@ -969,4 +974,16 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); void microcode_check(void); + +enum l1tf_mitigations { + L1TF_MITIGATION_OFF, + L1TF_MITIGATION_FLUSH_NOWARN, + L1TF_MITIGATION_FLUSH, + L1TF_MITIGATION_FLUSH_NOSMT, + L1TF_MITIGATION_FULL, + L1TF_MITIGATION_FULL_FORCE +}; + +extern enum l1tf_mitigations l1tf_mitigation; + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 461f53d27708ae8b80622753122c1a4927537ee2..fe2ee61880a86a7da9d2e33a1604c58d7051b86a 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -170,7 +170,6 @@ static inline int wbinvd_on_all_cpus(void) wbinvd(); return 0; } -#define smp_num_siblings 1 #endif /* CONFIG_SMP */ extern unsigned disabled_cpus; diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index c1d2a9892352742a1512306138da023d77c6050b..453cf38a1c33d5b2452dff29be06ce52b5b30ffb 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -123,13 +123,17 @@ static inline int topology_max_smt_threads(void) } int topology_update_package_map(unsigned int apicid, unsigned int cpu); -extern int topology_phys_to_logical_pkg(unsigned int pkg); +int topology_phys_to_logical_pkg(unsigned int pkg); +bool topology_is_primary_thread(unsigned int cpu); +bool topology_smt_supported(void); #else #define topology_max_packages() (1) static inline int topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } static inline int topology_max_smt_threads(void) { return 1; } +static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } +static inline bool topology_smt_supported(void) { return false; } #endif static inline void arch_fix_phys_package_id(int num, u32 slot) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 7c300299e12eaf862e48715e4bd6e23cd42574da..08c14aec26acaecb7ed96f04cd87da8a0d123116 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -571,4 +571,15 @@ enum vm_instruction_error_number { VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, }; +enum vmx_l1d_flush_state { + VMENTER_L1D_FLUSH_AUTO, + VMENTER_L1D_FLUSH_NEVER, + VMENTER_L1D_FLUSH_COND, + VMENTER_L1D_FLUSH_ALWAYS, + VMENTER_L1D_FLUSH_EPT_DISABLED, + VMENTER_L1D_FLUSH_NOT_REQUIRED, +}; + +extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; + #endif diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f48a51335538813cdd96ba82a46e87e745d9ef3f..2e64178f284da29666a9f4108c3f5614377575b1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,7 @@ #include #include #include +#include unsigned int num_processors; @@ -2092,6 +2094,23 @@ static int cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = -1, }; +#ifdef CONFIG_SMP +/** + * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread + * @id: APIC ID to check + */ +bool apic_id_is_primary_thread(unsigned int apicid) +{ + u32 mask; + + if (smp_num_siblings == 1) + return true; + /* Isolate the SMT bit(s) in the APICID and check for 0 */ + mask = (1U << (fls(smp_num_siblings) - 1)) - 1; + return !(apicid & mask); +} +#endif + /* * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids * and cpuid_to_apicid[] synchronized. diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c index 56ccf9346b08ac4bf3e54e7994074b6c04fb02e8..741de281ed5de06f917c14e6943fc383a0003bda 100644 --- a/arch/x86/kernel/apic/htirq.c +++ b/arch/x86/kernel/apic/htirq.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 3b89b27945fffc8f7c9d8dd28f9ed787158ad833..96a8a68f9c793377c305edf63113cd69c352cf45 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 9b18be76442236eab971cab86cbf366f9e1e134f..f10e7f93b0e2c04e7b5f22adbc7ebb574f710281 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 2ce1c708b8ee399db1f856ef8ce8d50cdec66bfb..b958082c74a77f0903c830949f6b3aae667adea9 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 90574f731c05062bea029310560f853489575ee5..dda741bd57892037ebcd19bb106109399847d4be 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -298,7 +298,6 @@ static int nearby_node(int apicid) } #endif -#ifdef CONFIG_SMP /* * Fix up cpu_core_id for pre-F17h systems to be in the * [0 .. cores_per_node - 1] range. Not really needed but @@ -315,6 +314,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c) c->cpu_core_id %= cus_per_node; } + +static void amd_get_topology_early(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_TOPOEXT)) + smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; +} + /* * Fixup core topology information for * (1) AMD multi-node processors @@ -333,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); node_id = ecx & 0xff; - smp_num_siblings = ((ebx >> 8) & 0xff) + 1; if (c->x86 == 0x15) c->cu_id = ebx & 0xff; @@ -376,7 +381,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) legacy_fixup_core_id(c); } } -#endif /* * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. @@ -384,7 +388,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) */ static void amd_detect_cmp(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP unsigned bits; int cpu = smp_processor_id(); @@ -396,16 +399,11 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; amd_get_topology(c); -#endif } u16 amd_get_nb_id(int cpu) { - u16 id = 0; -#ifdef CONFIG_SMP - id = per_cpu(cpu_llc_id, cpu); -#endif - return id; + return per_cpu(cpu_llc_id, cpu); } EXPORT_SYMBOL_GPL(amd_get_nb_id); @@ -579,6 +577,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) static void early_init_amd(struct cpuinfo_x86 *c) { + u64 value; u32 dummy; early_init_amd_mc(c); @@ -668,6 +667,22 @@ static void early_init_amd(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_SME); } } + + /* Re-enable TopologyExtensions if switched off by BIOS */ + if (c->x86 == 0x15 && + (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + + if (msr_set_bit(0xc0011005, 54) > 0) { + rdmsrl(0xc0011005, value); + if (value & BIT_64(54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); + } + } + } + + amd_get_topology_early(c); } static void init_amd_k8(struct cpuinfo_x86 *c) @@ -759,19 +774,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; - /* re-enable TopologyExtensions if switched off by BIOS */ - if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && - !cpu_has(c, X86_FEATURE_TOPOEXT)) { - - if (msr_set_bit(0xc0011005, 54) > 0) { - rdmsrl(0xc0011005, value); - if (value & BIT_64(54)) { - set_cpu_cap(c, X86_FEATURE_TOPOEXT); - pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); - } - } - } - /* * The way access filter has a performance penalty on some workloads. * Disable it on the affected CPUs. @@ -835,15 +837,8 @@ static void init_amd(struct cpuinfo_x86 *c) cpu_detect_cache_sizes(c); - /* Multi core CPU? */ - if (c->extended_cpuid_level >= 0x80000008) { - amd_detect_cmp(c); - srat_detect_node(c); - } - -#ifdef CONFIG_X86_32 - detect_ht(c); -#endif + amd_detect_cmp(c); + srat_detect_node(c); init_amd_cacheinfo(c); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 7416fc206b4a0e3f17be821e932d9fd840c03079..d07addb99b7180d1aefba2c939cd421acabc1592 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -22,14 +22,17 @@ #include #include #include +#include #include #include #include #include #include +#include static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); +static void __init l1tf_select_mitigation(void); /* * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any @@ -55,6 +58,12 @@ void __init check_bugs(void) { identify_boot_cpu(); + /* + * identify_boot_cpu() initialized SMT support information, let the + * core code know. + */ + cpu_smt_check_topology_early(); + if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); print_cpu_info(&boot_cpu_data); @@ -81,6 +90,8 @@ void __init check_bugs(void) */ ssb_select_mitigation(); + l1tf_select_mitigation(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -311,23 +322,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return cmd; } -/* Check for Skylake-like CPUs (for RSB handling) */ -static bool __init is_skylake_era(void) -{ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) { - switch (boot_cpu_data.x86_model) { - case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: - case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: - return true; - } - } - return false; -} - static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -388,22 +382,15 @@ static void __init spectre_v2_select_mitigation(void) pr_info("%s\n", spectre_v2_strings[mode]); /* - * If neither SMEP nor PTI are available, there is a risk of - * hitting userspace addresses in the RSB after a context switch - * from a shallow call stack to a deeper one. To prevent this fill - * the entire RSB, even when using IBRS. + * If spectre v2 protection has been enabled, unconditionally fill + * RSB during a context switch; this protects against two independent + * issues: * - * Skylake era CPUs have a separate issue with *underflow* of the - * RSB, when they will predict 'ret' targets from the generic BTB. - * The proper mitigation for this is IBRS. If IBRS is not supported - * or deactivated in favour of retpolines the RSB fill on context - * switch is required. + * - RSB underflow (and switch to BTB) on Skylake+ + * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs */ - if ((!boot_cpu_has(X86_FEATURE_PTI) && - !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); - } + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); /* Initialize Indirect Branch Prediction Barrier if supported */ if (boot_cpu_has(X86_FEATURE_IBPB)) { @@ -654,8 +641,120 @@ void x86_spec_ctrl_setup_ap(void) x86_amd_ssb_disable(); } +#undef pr_fmt +#define pr_fmt(fmt) "L1TF: " fmt + +/* Default mitigation for L1TF-affected CPUs */ +enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; +#if IS_ENABLED(CONFIG_KVM_INTEL) +EXPORT_SYMBOL_GPL(l1tf_mitigation); +#endif +enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; +EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + +static void __init l1tf_select_mitigation(void) +{ + u64 half_pa; + + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: + case L1TF_MITIGATION_FLUSH: + break; + case L1TF_MITIGATION_FLUSH_NOSMT: + case L1TF_MITIGATION_FULL: + cpu_smt_disable(false); + break; + case L1TF_MITIGATION_FULL_FORCE: + cpu_smt_disable(true); + break; + } + +#if CONFIG_PGTABLE_LEVELS == 2 + pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); + return; +#endif + + /* + * This is extremely unlikely to happen because almost all + * systems have far more MAX_PA/2 than RAM can be fit into + * DIMM slots. + */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); + return; + } + + setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); +} + +static int __init l1tf_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + l1tf_mitigation = L1TF_MITIGATION_OFF; + else if (!strcmp(str, "flush,nowarn")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; + else if (!strcmp(str, "flush")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH; + else if (!strcmp(str, "flush,nosmt")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + else if (!strcmp(str, "full")) + l1tf_mitigation = L1TF_MITIGATION_FULL; + else if (!strcmp(str, "full,force")) + l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; + + return 0; +} +early_param("l1tf", l1tf_cmdline); + +#undef pr_fmt + #ifdef CONFIG_SYSFS +#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" + +#if IS_ENABLED(CONFIG_KVM_INTEL) +static const char *l1tf_vmx_states[] = { + [VMENTER_L1D_FLUSH_AUTO] = "auto", + [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", + [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", + [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", + [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" +}; + +static ssize_t l1tf_show_state(char *buf) +{ + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || + (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && + cpu_smt_control == CPU_SMT_ENABLED)) + return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation]); + + return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation], + cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); +} +#else +static ssize_t l1tf_show_state(char *buf) +{ + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); +} +#endif + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -681,6 +780,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + case X86_BUG_L1TF: + if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) + return l1tf_show_state(buf); + break; default: break; } @@ -707,4 +810,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * { return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); } + +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 48e98964ecadb71754c32f300d2e99544e733400..dd02ee4fa8cd9f4340d994e941156a279d048b46 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -66,6 +66,13 @@ cpumask_var_t cpu_callin_mask; /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; +/* Number of siblings per CPU package */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Last level cache ID of each logical CPU */ +DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; + /* correctly size the local cpu masks */ void __init setup_cpu_local_masks(void) { @@ -614,33 +621,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c) tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); } -void detect_ht(struct cpuinfo_x86 *c) +int detect_ht_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; - int index_msb, core_bits; - static bool printed; if (!cpu_has(c, X86_FEATURE_HT)) - return; + return -1; if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) - goto out; + return -1; if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) - return; + return -1; cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; - - if (smp_num_siblings == 1) { + if (smp_num_siblings == 1) pr_info_once("CPU0: Hyper-Threading is disabled\n"); - goto out; - } +#endif + return 0; +} - if (smp_num_siblings <= 1) - goto out; +void detect_ht(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + int index_msb, core_bits; + + if (detect_ht_early(c) < 0) + return; index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); @@ -653,15 +663,6 @@ void detect_ht(struct cpuinfo_x86 *c) c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); - -out: - if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { - pr_info("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - pr_info("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - printed = 1; - } #endif } @@ -933,6 +934,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { {} }; +static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { + /* in addition to cpu_no_speculation */ + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + {} +}; + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = 0; @@ -958,6 +974,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) return; setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + + if (x86_match_cpu(cpu_no_l1tf)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); } /* diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 37672d299e357430f2d16941905e352e9e89f648..cca588407dca80915a1eec1197a84e0d1f13e8f4 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -47,6 +47,8 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); +extern int detect_extended_topology_early(struct cpuinfo_x86 *c); +extern int detect_ht_early(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0b2330e191694c638b40cc2b5b7f797d31e18d6e..278be092b3009e99d4aa6bb7dc81fa4f1622e398 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -301,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c) } check_mpx_erratum(c); + + /* + * Get the number of SMT siblings early from the extended topology + * leaf, if available. Otherwise try the legacy SMT detection. + */ + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 4fc0e08a30b9981faf809132ef74a129bd1572a1..387a8f44fba1ecca7436e2cf2ae6b0cdbb43977f 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -509,12 +509,20 @@ static struct platform_device *microcode_pdev; static int check_online_cpus(void) { - if (num_online_cpus() == num_present_cpus()) - return 0; + unsigned int cpu; - pr_err("Not all CPUs online, aborting microcode update.\n"); + /* + * Make sure all CPUs are online. It's fine for SMT to be disabled if + * all the primary threads are still online. + */ + for_each_present_cpu(cpu) { + if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { + pr_err("Not all CPUs online, aborting microcode update.\n"); + return -EINVAL; + } + } - return -EINVAL; + return 0; } static atomic_t late_cpus_in; diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index b099024d339c386929dbda450a328bc2b3887377..19c6e800e8162ae90ec852b3437c9d5e7be5e8d4 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -27,16 +27,13 @@ * exists, use it for populating initial_apicid and cpu topology * detection. */ -void detect_extended_topology(struct cpuinfo_x86 *c) +int detect_extended_topology_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP - unsigned int eax, ebx, ecx, edx, sub_index; - unsigned int ht_mask_width, core_plus_mask_width; - unsigned int core_select_mask, core_level_siblings; - static bool printed; + unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 0xb) - return; + return -1; cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); @@ -44,7 +41,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c) * check if the cpuid leaf 0xb is actually implemented. */ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) - return; + return -1; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); @@ -52,10 +49,30 @@ void detect_extended_topology(struct cpuinfo_x86 *c) * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; + smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); +#endif + return 0; +} + +/* + * Check for extended topology enumeration cpuid leaf 0xb and if it + * exists, use it for populating initial_apicid and cpu topology + * detection. + */ +void detect_extended_topology(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int eax, ebx, ecx, edx, sub_index; + unsigned int ht_mask_width, core_plus_mask_width; + unsigned int core_select_mask, core_level_siblings; + + if (detect_extended_topology_early(c) < 0) + return; /* * Populate HT related information from sub-leaf level 0. */ + cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); @@ -86,15 +103,5 @@ void detect_extended_topology(struct cpuinfo_x86 *c) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); - - if (!printed) { - pr_info("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - if (c->x86_max_cores > 1) - pr_info("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - printed = 1; - } - return; #endif } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index f92a6593de1ec651d244d43d16acbd8d49959e4b..2ea85b32421a02d5f3f8e6643757610576841e3c 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e39accb3f8e53ab7eb5372e0726f7..7acb87cb2da83309f86df91dcc239044037ee1e3 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -27,6 +27,7 @@ #include #include +#include #include #include diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 8ce4212e2b8d0f139e543e05e7e284ee25ee856d..afa1a204bc6d69029a2520725b174ba201dd7cd7 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 8f5cb2c7060cfc29b46ec24669ec418dfdf0c0df..02abc134367ff0c0d93d2ac2758006a5f0925637 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 0c5256653d6c5256eef39c5dc6768d49e40889a0..38c3d5790970cfaa3bd0e52018dff7095acf270f 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -8,6 +8,7 @@ #include #include #include +#include struct idt_data { unsigned int vector; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index aa9d51eea9d0e806e51122421e77d67dd31bb027..3c2326b598208ceb1254468e3dd9a01e23ec0e67 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index c1bdbd3d3232cb83d07bfa4ce604ae0b620c796a..95600a99ae93652dbbd4143c9e538e808911bb24 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index d86e344f5b3debfed504b72a7c0f83f36fe16387..0469cd078db15c0c26700fc7a50f9c3f8144ff2a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 1e4094eba15e95e6f9b3c6f19de6d748bb894dd1..40f83d0d7b8a3e8da20a46a9f899c39b064d46c4 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f1030c522e06c868146f7e13f13a6b9c54cbaf67..65452d555f0514178528bfc13d96c7bbd8d19cab 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "common.h" @@ -394,8 +395,6 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) - (u8 *) dest; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", - src, dest, insn->displacement.value); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); @@ -621,8 +620,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, * Raise a BUG or we'll continue in an endless reentering loop * and eventually a stack overflow. */ - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_err("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); default: diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e1df9ef5d78c95516f877bfdbeeceaf3ba09cde0..f3559b84cd7536f44f16d3b316e149e4a17f261f 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf, struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); - if (tgt_clobbers & ~site_clobbers) - return len; /* target would clobber too much for this site */ - if (len < 5) + if (len < 5) { +#ifdef CONFIG_RETPOLINE + WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); +#endif return len; /* call too long for patch site */ + } b->opcode = 0xe8; /* call */ b->delta = delta; @@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); - if (len < 5) + if (len < 5) { +#ifdef CONFIG_RETPOLINE + WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); +#endif return len; /* call too long for patch site */ + } b->opcode = 0xe9; /* jmp */ b->delta = delta; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index efbcf5283520ba28741388cb29b9e2cb5ede6376..dcb00acb6583c83432552f6032fb22f3d33fe980 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -852,6 +852,12 @@ void __init setup_arch(char **cmdline_p) memblock_reserve(__pa_symbol(_text), (unsigned long)__bss_stop - (unsigned long)_text); + /* + * Make sure page 0 is always reserved because on systems with + * L1TF its contents can be leaked to user processes. + */ + memblock_reserve(0, PAGE_SIZE); + early_reserve_initrd(); /* diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 5c574dff4c1a00990c6e6d8e5d993cc2777c6c6f..04adc8d60aed82178caf3a099d66b497a6c11bcf 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); + kvm_set_cpu_l1tf_flush_l1d(); if (trace_resched_ipi_enabled()) { /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 344d3c160f8d779773a7c25ff4f971d5c2273622..5ebb0dbcf4f7884bf12978c5817a0524d9f1313e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -78,13 +78,7 @@ #include #include #include - -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - -/* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; +#include /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); @@ -311,6 +305,23 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu) return 0; } +/** + * topology_is_primary_thread - Check whether CPU is the primary SMT thread + * @cpu: CPU to check + */ +bool topology_is_primary_thread(unsigned int cpu) +{ + return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu)); +} + +/** + * topology_smt_supported - Check whether SMT is supported by the CPUs + */ +bool topology_smt_supported(void) +{ + return smp_num_siblings > 1; +} + /** * topology_phys_to_logical_pkg - Map a physical package id to a logical * diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 879af864d99afd6c8645f0d74fe71bf6a2bade07..49a5c394f3ed45af7afd9e1841b6833ec120ad90 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2ef2f1fe875bf7aa908876014a134d6e91e13e9e..00e2ae033a0f57b3980767c2beb5388043b35f2b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3825,6 +3825,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, { int r = 1; + vcpu->arch.l1tf_flush_l1d = true; switch (vcpu->arch.apf.host_apf_reason) { default: trace_kvm_page_fault(fault_address, error_code); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cfa155078ebb70b006788c2692d8a3ee9f6e4422..282bbcbf3b6a999b449cf1705a27e53a16e4b480 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -175,6 +175,8 @@ struct vcpu_svm { uint64_t sysenter_eip; uint64_t tsc_aux; + u64 msr_decfg; + u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; @@ -1616,6 +1618,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + vcpu->arch.microcode_version = 0x01000065; svm->spec_ctrl = 0; svm->virt_spec_ctrl = 0; @@ -3555,6 +3558,22 @@ static int cr8_write_interception(struct vcpu_svm *svm) return 0; } +static int svm_get_msr_feature(struct kvm_msr_entry *msr) +{ + msr->data = 0; + + switch (msr->index) { + case MSR_F10H_DECFG: + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; + break; + default: + return 1; + } + + return 0; +} + static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3637,9 +3656,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = svm->virt_spec_ctrl; break; - case MSR_IA32_UCODE_REV: - msr_info->data = 0x01000065; - break; case MSR_F15H_IC_CFG: { int family, model; @@ -3657,6 +3673,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x1E; } break; + case MSR_F10H_DECFG: + msr_info->data = svm->msr_decfg; + break; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3845,6 +3864,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; + case MSR_F10H_DECFG: { + struct kvm_msr_entry msr_entry; + + msr_entry.index = msr->index; + if (svm_get_msr_feature(&msr_entry)) + return 1; + + /* Check the supported bits */ + if (data & ~msr_entry.data) + return 1; + + /* Don't allow the guest to change a bit, #GP */ + if (!msr->host_initiated && (data ^ msr_entry.data)) + return 1; + + svm->msr_decfg = data; + break; + } case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); @@ -5588,6 +5625,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .vcpu_unblocking = svm_vcpu_unblocking, .update_bp_intercept = update_bp_intercept, + .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8d000fde14140e43f7b535a19c2346033bdbb203..f015ca3997d9216c28c1db88b1e7e7cf0e6de64f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -191,6 +191,150 @@ module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); +static DEFINE_MUTEX(vmx_l1d_flush_mutex); + +/* Storage for pre module init parameter parsing */ +static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; + +static const struct { + const char *option; + enum vmx_l1d_flush_state cmd; +} vmentry_l1d_param[] = { + {"auto", VMENTER_L1D_FLUSH_AUTO}, + {"never", VMENTER_L1D_FLUSH_NEVER}, + {"cond", VMENTER_L1D_FLUSH_COND}, + {"always", VMENTER_L1D_FLUSH_ALWAYS}, +}; + +#define L1D_CACHE_ORDER 4 +static void *vmx_l1d_flush_pages; + +static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) +{ + struct page *page; + unsigned int i; + + if (!enable_ept) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; + return 0; + } + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { + u64 msr; + + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; + } + } + + /* If set to auto use the default l1tf mitigation method */ + if (l1tf == VMENTER_L1D_FLUSH_AUTO) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + l1tf = VMENTER_L1D_FLUSH_NEVER; + break; + case L1TF_MITIGATION_FLUSH_NOWARN: + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + l1tf = VMENTER_L1D_FLUSH_COND; + break; + case L1TF_MITIGATION_FULL: + case L1TF_MITIGATION_FULL_FORCE: + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + break; + } + } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + } + + if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && + !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { + page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); + if (!page) + return -ENOMEM; + vmx_l1d_flush_pages = page_address(page); + + /* + * Initialize each page with a different pattern in + * order to protect against KSM in the nested + * virtualization case. + */ + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, + PAGE_SIZE); + } + } + + l1tf_vmx_mitigation = l1tf; + + if (l1tf != VMENTER_L1D_FLUSH_NEVER) + static_branch_enable(&vmx_l1d_should_flush); + else + static_branch_disable(&vmx_l1d_should_flush); + + if (l1tf == VMENTER_L1D_FLUSH_COND) + static_branch_enable(&vmx_l1d_flush_cond); + else + static_branch_disable(&vmx_l1d_flush_cond); + return 0; +} + +static int vmentry_l1d_flush_parse(const char *s) +{ + unsigned int i; + + if (s) { + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { + if (sysfs_streq(s, vmentry_l1d_param[i].option)) + return vmentry_l1d_param[i].cmd; + } + } + return -EINVAL; +} + +static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) +{ + int l1tf, ret; + + if (!boot_cpu_has(X86_BUG_L1TF)) + return 0; + + l1tf = vmentry_l1d_flush_parse(s); + if (l1tf < 0) + return l1tf; + + /* + * Has vmx_init() run already? If not then this is the pre init + * parameter parsing. In that case just store the value and let + * vmx_init() do the proper setup after enable_ept has been + * established. + */ + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { + vmentry_l1d_flush_param = l1tf; + return 0; + } + + mutex_lock(&vmx_l1d_flush_mutex); + ret = vmx_setup_l1d_flush(l1tf); + mutex_unlock(&vmx_l1d_flush_mutex); + return ret; +} + +static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) +{ + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); +} + +static const struct kernel_param_ops vmentry_l1d_flush_ops = { + .set = vmentry_l1d_flush_set, + .get = vmentry_l1d_flush_get, +}; +module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); + #define NR_AUTOLOAD_MSRS 8 struct vmcs { @@ -567,6 +711,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc) (unsigned long *)&pi_desc->control); } +struct vmx_msrs { + unsigned int nr; + struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; +}; + struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; @@ -600,9 +749,8 @@ struct vcpu_vmx { struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { - unsigned nr; - struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; - struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; + struct vmx_msrs guest; + struct vmx_msrs host; } msr_autoload; struct { int loaded; @@ -1967,9 +2115,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_clearbit(vmx, exit); } +static int find_msr(struct vmx_msrs *m, unsigned int msr) +{ + unsigned int i; + + for (i = 0; i < m->nr; ++i) { + if (m->val[i].index == msr) + return i; + } + return -ENOENT; +} + static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { - unsigned i; + int i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -1990,18 +2149,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) } break; } + i = find_msr(&m->guest, msr); + if (i < 0) + goto skip_guest; + --m->guest.nr; + m->guest.val[i] = m->guest.val[m->guest.nr]; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); - for (i = 0; i < m->nr; ++i) - if (m->guest[i].index == msr) - break; - - if (i == m->nr) +skip_guest: + i = find_msr(&m->host, msr); + if (i < 0) return; - --m->nr; - m->guest[i] = m->guest[m->nr]; - m->host[i] = m->host[m->nr]; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); + + --m->host.nr; + m->host.val[i] = m->host.val[m->host.nr]; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, @@ -2016,9 +2178,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, - u64 guest_val, u64 host_val) + u64 guest_val, u64 host_val, bool entry_only) { - unsigned i; + int i, j = 0; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -2053,24 +2215,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } - for (i = 0; i < m->nr; ++i) - if (m->guest[i].index == msr) - break; + i = find_msr(&m->guest, msr); + if (!entry_only) + j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS) { + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; - } else if (i == m->nr) { - ++m->nr; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } + if (i < 0) { + i = m->guest.nr++; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + } + m->guest.val[i].index = msr; + m->guest.val[i].value = guest_val; - m->guest[i].index = msr; - m->guest[i].value = guest_val; - m->host[i].index = msr; - m->host[i].value = host_val; + if (entry_only) + return; + + if (j < 0) { + j = m->host.nr++; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); + } + m->host.val[j].index = msr; + m->host.val[j].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) @@ -2114,7 +2283,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, - guest_efer, host_efer); + guest_efer, host_efer, false); return false; } else { guest_efer &= ~ignore_bits; @@ -3266,6 +3435,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, return !(val & ~valid_bits); } +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) +{ + return 1; +} + /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. @@ -3523,7 +3697,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss); + vcpu->arch.ia32_xss, host_xss, false); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; @@ -5714,9 +5888,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); @@ -5736,8 +5910,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities); + vmx->arch_capabilities = kvm_get_arch_capabilities(); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); @@ -5770,6 +5943,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; + vcpu->arch.microcode_version = 0x100000000ULL; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); @@ -8987,6 +9161,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) } } +/* + * Software based L1D cache flush which is used when microcode providing + * the cache control MSR is not loaded. + * + * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to + * flush it is required to read in 64 KiB because the replacement algorithm + * is not exactly LRU. This could be sized at runtime via topology + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +#define L1D_CACHE_ORDER 4 +static void *vmx_l1d_flush_pages; + +static void vmx_l1d_flush(struct kvm_vcpu *vcpu) +{ + int size = PAGE_SIZE << L1D_CACHE_ORDER; + + /* + * This code is only executed when the the flush mode is 'cond' or + * 'always' + */ + if (static_branch_likely(&vmx_l1d_flush_cond)) { + bool flush_l1d; + + /* + * Clear the per-vcpu flush bit, it gets set again + * either from vcpu_run() or from one of the unsafe + * VMEXIT handlers. + */ + flush_l1d = vcpu->arch.l1tf_flush_l1d; + vcpu->arch.l1tf_flush_l1d = false; + + /* + * Clear the per-cpu flush bit, it gets set again from + * the interrupt handlers. + */ + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + kvm_clear_cpu_l1tf_flush_l1d(); + + if (!flush_l1d) + return; + } + + vcpu->stat.l1d_flush++; + + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + return; + } + + asm volatile( + /* First ensure the pages are in the TLB */ + "xorl %%eax, %%eax\n" + ".Lpopulate_tlb:\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $4096, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lpopulate_tlb\n\t" + "xorl %%eax, %%eax\n\t" + "cpuid\n\t" + /* Now fill the cache */ + "xorl %%eax, %%eax\n" + ".Lfill_cache:\n" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $64, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lfill_cache\n\t" + "lfence\n" + :: [flush_pages] "r" (vmx_l1d_flush_pages), + [size] "r" (size) + : "eax", "ebx", "ecx", "edx"); +} + static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -9390,7 +9637,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, - msrs[i].host); + msrs[i].host, false); } static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) @@ -9483,6 +9730,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->__launched = vmx->loaded_vmcs->launched; + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); + asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" @@ -9835,6 +10085,37 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(err); } +#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" +#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" + +static int vmx_vm_init(struct kvm *kvm) +{ + if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: + /* 'I explicitly don't care' is set */ + break; + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + case L1TF_MITIGATION_FULL: + /* + * Warn upon starting the first VM in a potentially + * insecure environment. + */ + if (cpu_smt_control == CPU_SMT_ENABLED) + pr_warn_once(L1TF_MSG_SMT); + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) + pr_warn_once(L1TF_MSG_L1D); + break; + case L1TF_MITIGATION_FULL_FORCE: + /* Flush is enforced */ + break; + } + } + return 0; +} + static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; @@ -10774,10 +11055,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before @@ -11202,6 +11483,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (ret) return ret; + /* Hide L1D cache contents from the nested guest. */ + vmx->vcpu.arch.l1tf_flush_l1d = true; + /* * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken * by event injection, halt vcpu. @@ -11712,8 +11996,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmx_segment_cache_clear(vmx); /* Update any VMCS fields that might have changed while L2 ran */ - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (vmx->hv_deadline_tsc == -1) vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, @@ -12225,6 +12509,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .cpu_has_accelerated_tpr = report_flexpriority, .has_emulated_msr = vmx_has_emulated_msr, + .vm_init = vmx_vm_init, + .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, @@ -12234,6 +12520,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, + .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, @@ -12341,22 +12628,18 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .setup_mce = vmx_setup_mce, }; -static int __init vmx_init(void) +static void vmx_cleanup_l1d_flush(void) { - int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), - __alignof__(struct vcpu_vmx), THIS_MODULE); - if (r) - return r; - -#ifdef CONFIG_KEXEC_CORE - rcu_assign_pointer(crash_vmclear_loaded_vmcss, - crash_vmclear_local_loaded_vmcss); -#endif - - return 0; + if (vmx_l1d_flush_pages) { + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); + vmx_l1d_flush_pages = NULL; + } + /* Restore state so sysfs ignores VMX */ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; } -static void __exit vmx_exit(void) + +static void vmx_exit(void) { #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); @@ -12364,7 +12647,40 @@ static void __exit vmx_exit(void) #endif kvm_exit(); + + vmx_cleanup_l1d_flush(); } +module_exit(vmx_exit) +static int __init vmx_init(void) +{ + int r; + + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); + if (r) + return r; + + /* + * Must be called after kvm_init() so enable_ept is properly set + * up. Hand the parameter mitigation value in which was stored in + * the pre module init parser. If no parameter was given, it will + * contain 'auto' which will be turned into the default 'cond' + * mitigation mode. + */ + if (boot_cpu_has(X86_BUG_L1TF)) { + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); + if (r) { + vmx_exit(); + return r; + } + } + +#ifdef CONFIG_KEXEC_CORE + rcu_assign_pointer(crash_vmclear_loaded_vmcss, + crash_vmclear_local_loaded_vmcss); +#endif + + return 0; +} module_init(vmx_init) -module_exit(vmx_exit) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2f3fe25639b345fe6de659369702b8aa9aa9b135..5c2c09f6c1c31310ad0e8ab9c124bb29540fb377 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -181,6 +181,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "req_event", VCPU_STAT(req_event) }, + { "l1d_flush", VCPU_STAT(l1d_flush) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, @@ -1041,6 +1042,71 @@ static u32 emulated_msrs[] = { static unsigned num_emulated_msrs; +/* + * List of msr numbers which are used to expose MSR-based features that + * can be used by a hypervisor to validate requested CPU features. + */ +static u32 msr_based_features[] = { + MSR_F10H_DECFG, + MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, +}; + +static unsigned int num_msr_based_features; + +u64 kvm_get_arch_capabilities(void) +{ + u64 data; + + rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data); + + /* + * If we're doing cache flushes (either "always" or "cond") + * we will do one whenever the guest does a vmlaunch/vmresume. + * If an outer hypervisor is doing the cache flush for us + * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that + * capability to the guest too, and if EPT is disabled we're not + * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will + * require a nested hypervisor to do a flush of its own. + */ + if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) + data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; + + return data; +} +EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); + +static int kvm_get_msr_feature(struct kvm_msr_entry *msr) +{ + switch (msr->index) { + case MSR_IA32_ARCH_CAPABILITIES: + msr->data = kvm_get_arch_capabilities(); + break; + case MSR_IA32_UCODE_REV: + rdmsrl_safe(msr->index, &msr->data); + break; + default: + if (kvm_x86_ops->get_msr_feature(msr)) + return 1; + } + return 0; +} + +static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) +{ + struct kvm_msr_entry msr; + int r; + + msr.index = index; + r = kvm_get_msr_feature(&msr); + if (r) + return r; + + *data = msr.data; + + return 0; +} + bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) @@ -2156,7 +2222,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_AMD64_NB_CFG: - case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: @@ -2164,6 +2229,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DC_CFG: break; + case MSR_IA32_UCODE_REV: + if (msr_info->host_initiated) + vcpu->arch.microcode_version = data; + break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: @@ -2450,7 +2519,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0; break; case MSR_IA32_UCODE_REV: - msr_info->data = 0x100000000ULL; + msr_info->data = vcpu->arch.microcode_version; break; case MSR_MTRRcap: case 0x200 ... 0x2ff: @@ -2600,13 +2669,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { - int i, idx; + int i; - idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; - srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } @@ -2705,6 +2772,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_GET_MSR_FEATURES: r = 1; break; case KVM_CAP_ADJUST_CLOCK: @@ -2819,6 +2887,31 @@ long kvm_arch_dev_ioctl(struct file *filp, goto out; r = 0; break; + case KVM_GET_MSR_FEATURE_INDEX_LIST: { + struct kvm_msr_list __user *user_msr_list = argp; + struct kvm_msr_list msr_list; + unsigned int n; + + r = -EFAULT; + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) + goto out; + n = msr_list.nmsrs; + msr_list.nmsrs = num_msr_based_features; + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) + goto out; + r = -E2BIG; + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; + if (copy_to_user(user_msr_list->indices, &msr_based_features, + num_msr_based_features * sizeof(u32))) + goto out; + r = 0; + break; + } + case KVM_GET_MSRS: + r = msr_io(NULL, argp, do_get_msr_feature, 1); + break; } default: r = -EINVAL; @@ -3553,12 +3646,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } - case KVM_GET_MSRS: + case KVM_GET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_get_msr, 1); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; - case KVM_SET_MSRS: + } + case KVM_SET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_set_msr, 0); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; + } case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; @@ -4333,6 +4432,19 @@ static void kvm_init_msr_list(void) j++; } num_emulated_msrs = j; + + for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { + struct kvm_msr_entry msr; + + msr.index = msr_based_features[i]; + if (kvm_get_msr_feature(&msr)) + continue; + + if (j < i) + msr_based_features[j] = msr_based_features[i]; + j++; + } + num_msr_based_features = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, @@ -4573,6 +4685,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { + /* kvm_write_guest_virt_system can pull in tons of pages. */ + vcpu->arch.l1tf_flush_l1d = true; + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } @@ -5701,6 +5816,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; + vcpu->arch.l1tf_flush_l1d = true; + /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. @@ -7146,6 +7263,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + vcpu->arch.l1tf_flush_l1d = true; for (;;) { if (kvm_vcpu_running(vcpu)) { @@ -8153,6 +8271,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { + vcpu->arch.l1tf_flush_l1d = true; kvm_x86_ops->sched_in(vcpu, cpu); } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0133d26f16bea276206888ce4418634d27e38bad..c2faff548f59a575024a62e4291bf567a8939dc3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -24,6 +24,7 @@ #include /* emulate_vsyscall */ #include /* struct vm86 */ #include /* vma_pkey() */ +#include #define CREATE_TRACE_POINTS #include diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 071cbbbb60d96974a9f790fd6b69e3ff9ffe1162..37f60dfd7e4efbeb5f01ccceaaacef6b98537bf6 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -4,6 +4,8 @@ #include #include #include /* for max_low_pfn */ +#include +#include #include #include @@ -880,3 +882,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); __pte2cachemode_tbl[entry] = cache; } + +#ifdef CONFIG_SWAP +unsigned long max_swapfile_size(void) +{ + unsigned long pages; + + pages = generic_max_swapfile_size(); + + if (boot_cpu_has_bug(X86_BUG_L1TF)) { + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ + unsigned long l1tf_limit = l1tf_pfn_limit() + 1; + /* + * We encode swap offsets also with 3 bits below those for pfn + * which makes the usable limit higher. + */ +#if CONFIG_PGTABLE_LEVELS > 2 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; +#endif + pages = min_t(unsigned long, l1tf_limit, pages); + } + return pages; +} +#endif diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 7c868670963617865d2e9fb494a08b04a82b49cb..79eb55ce69a91f716c15524d56816604bcdcc100 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) { + pmd_t new_pmd; pmdval_t v = pmd_val(*pmd); if (clear) { - *old = v & _PAGE_PRESENT; - v &= ~_PAGE_PRESENT; - } else /* presume this has been called with clear==true previously */ - v |= *old; - set_pmd(pmd, __pmd(v)); + *old = v; + new_pmd = pmd_mknotpresent(*pmd); + } else { + /* Presume this has been called with clear==true previously */ + new_pmd = __pmd(*old); + } + set_pmd(pmd, new_pmd); } static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) { pteval_t v = pte_val(*pte); if (clear) { - *old = v & _PAGE_PRESENT; - v &= ~_PAGE_PRESENT; - } else /* presume this has been called with clear==true previously */ - v |= *old; - set_pte_atomic(pte, __pte(v)); + *old = v; + /* Nothing should care about address */ + pte_clear(&init_mm, 0, pte); + } else { + /* Presume this has been called with clear==true previously */ + set_pte_atomic(pte, __pte(*old)); + } } static int clear_page_presence(struct kmmio_fault_page *f, bool clear) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index a9967982684649155cfcdc921d5247c8fbfe70d6..5f4805d69aab2af70a9dcbe39877096cb9807fca 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -174,3 +174,24 @@ const char *arch_vma_name(struct vm_area_struct *vma) return "[mpx]"; return NULL; } + +/* + * Only allow root to set high MMIO mappings to PROT_NONE. + * This prevents an unpriv. user to set them to PROT_NONE and invert + * them, then pointing to valid memory for L1TF speculation. + * + * Note: for locked down kernels may want to disable the root override. + */ +bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return true; + if (!__pte_needs_invert(pgprot_val(prot))) + return true; + /* If it's real memory always allow */ + if (pfn_valid(pfn)) + return true; + if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + return false; + return true; +} diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4085897fef648700f669fea03907828296076fb8..464f53da3a6f53f198d860b0bef0ce9c41db362f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1006,8 +1006,8 @@ static long populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | - massage_pgprot(pmd_pgprot))); + set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, + canon_pgprot(pmd_pgprot)))); start += PMD_SIZE; cpa->pfn += PMD_SIZE >> PAGE_SHIFT; @@ -1079,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, * Map everything starting from the Gb boundary, possibly with 1G pages */ while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | - massage_pgprot(pud_pgprot))); + set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, + canon_pgprot(pud_pgprot)))); start += PUD_SIZE; cpa->pfn += PUD_SIZE >> PAGE_SHIFT; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c03c85e4fb6a6cb5fe35479a9e1cb63d65b7984c..2bdb8e8a9d7c85509d3ed2a70630e7a7c353c0eb 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -712,28 +712,50 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } +#ifdef CONFIG_X86_64 /** * pud_free_pmd_page - Clear pud entry and free pmd page. * @pud: Pointer to a PUD. + * @addr: Virtual address associated with pud. * - * Context: The pud range has been unmaped and TLB purged. + * Context: The pud range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. + * + * NOTE: Callers must allow a single page allocation. */ -int pud_free_pmd_page(pud_t *pud) +int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - pmd_t *pmd; + pmd_t *pmd, *pmd_sv; + pte_t *pte; int i; if (pud_none(*pud)) return 1; pmd = (pmd_t *)pud_page_vaddr(*pud); + pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); + if (!pmd_sv) + return 0; - for (i = 0; i < PTRS_PER_PMD; i++) - if (!pmd_free_pte_page(&pmd[i])) - return 0; + for (i = 0; i < PTRS_PER_PMD; i++) { + pmd_sv[i] = pmd[i]; + if (!pmd_none(pmd[i])) + pmd_clear(&pmd[i]); + } pud_clear(pud); + + /* INVLPG to clear all paging-structure caches */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); + + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(pmd_sv[i])) { + pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); + free_page((unsigned long)pte); + } + } + + free_page((unsigned long)pmd_sv); free_page((unsigned long)pmd); return 1; @@ -742,11 +764,12 @@ int pud_free_pmd_page(pud_t *pud) /** * pmd_free_pte_page - Clear pmd entry and free pte page. * @pmd: Pointer to a PMD. + * @addr: Virtual address associated with pmd. * - * Context: The pmd range has been unmaped and TLB purged. + * Context: The pmd range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. */ -int pmd_free_pte_page(pmd_t *pmd) +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { pte_t *pte; @@ -755,8 +778,30 @@ int pmd_free_pte_page(pmd_t *pmd) pte = (pte_t *)pmd_page_vaddr(*pmd); pmd_clear(pmd); + + /* INVLPG to clear all paging-structure caches */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); + free_page((unsigned long)pte); return 1; } + +#else /* !CONFIG_X86_64 */ + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return pud_none(*pud); +} + +/* + * Disable free page handling on x86-PAE. This assures that ioremap() + * does not update sync'd pmd entries. See vmalloc_sync_one(). + */ +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return pmd_none(*pmd); +} + +#endif /* CONFIG_X86_64 */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index ce38f165489b5a13d92091c8671879f30ce44e20..d6f11accd37a0590a7eb3ff22a8aa4ecedb7670b 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -45,6 +45,7 @@ #include #include #include +#include #undef pr_fmt #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 4f5fa65a10110344aa3d763fdf5b73a5d6652c0b..2acd6be133755872bad09e920fc0db1cb4911d2d 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -18,6 +18,7 @@ #include #include #include +#include #define TANGIER_EXT_TIMER0_MSI 12 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 0b530c53de1f8bc4edf2d4deac6b60620de4c491..34f9a9ce62360cb20d4f1354410a2df8c9a988a6 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) struct msg_desc msgdesc; ack_APIC_irq(); + kvm_set_cpu_l1tf_flush_l1d(); time_start = get_cycles(); bcp = &per_cpu(bau_control, smp_processor_id()); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a..df208af3cd749415c67aa270adb5f225a3d42b37 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -3,6 +3,7 @@ #endif #include #include +#include #include #include diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index db6d90e451de908d6be6165e6e2e47c5379619e0..e3b18ad49889afc5ae35d2e2796aecd108a93819 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -430,6 +430,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */ * data back is to call: */ tick_nohz_idle_enter(); + tick_nohz_idle_stop_tick_protected(); cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4a4b7d3c909a846ac66144cedca8fa891e09080d..3b44bd28fc4521e40c787c5bd7957443fcb506c5 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1203,6 +1203,24 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) return dur; } +/* + * Return the farthest future time instant according to jiffies + * macros. + */ +static unsigned long bfq_greatest_from_now(void) +{ + return jiffies + MAX_JIFFY_OFFSET; +} + +/* + * Return the farthest past time instant according to jiffies + * macros. + */ +static unsigned long bfq_smallest_from_now(void) +{ + return jiffies - MAX_JIFFY_OFFSET; +} + static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, struct bfq_queue *bfqq, unsigned int old_wr_coeff, @@ -1217,7 +1235,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); } else { - bfqq->wr_start_at_switch_to_srt = jiffies; + /* + * No interactive weight raising in progress + * here: assign minus infinity to + * wr_start_at_switch_to_srt, to make sure + * that, at the end of the soft-real-time + * weight raising periods that is starting + * now, no interactive weight-raising period + * may be wrongly considered as still in + * progress (and thus actually started by + * mistake). + */ + bfqq->wr_start_at_switch_to_srt = + bfq_smallest_from_now(); bfqq->wr_coeff = bfqd->bfq_wr_coeff * BFQ_SOFTRT_WEIGHT_FACTOR; bfqq->wr_cur_max_time = @@ -2896,24 +2926,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); } -/* - * Return the farthest future time instant according to jiffies - * macros. - */ -static unsigned long bfq_greatest_from_now(void) -{ - return jiffies + MAX_JIFFY_OFFSET; -} - -/* - * Return the farthest past time instant according to jiffies - * macros. - */ -static unsigned long bfq_smallest_from_now(void) -{ - return jiffies - MAX_JIFFY_OFFSET; -} - /** * bfq_bfqq_expire - expire a queue. * @bfqd: device owning the queue. diff --git a/block/bio.c b/block/bio.c index 3fa014e609997ff693fcd3d98a565a4d05ff57be..3a63aba1509dcbc36f3d4e16e10eebe40dc04ed6 100644 --- a/block/bio.c +++ b/block/bio.c @@ -727,6 +727,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, } } + bio_clone_crypt_key(bio, bio_src); bio_clone_blkcg_association(bio, bio_src); return bio; diff --git a/block/blk-core.c b/block/blk-core.c index a8ca7d4ed992b482e872598317287e34a0652abf..d21f176246b246acba0075087e7bebb7501c51e8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1650,6 +1650,7 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; + WARN_ON(req->__dun || bio->bi_iter.bi_dun); req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -1799,6 +1800,7 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio) else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; + req->__dun = bio->bi_iter.bi_dun; blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); @@ -1999,7 +2001,8 @@ static inline int blk_partition_remap(struct bio *bio) trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), bio->bi_iter.bi_sector - p->start_sect); } else { - printk("%s: fail for partition %d\n", __func__, bio->bi_partno); + printk_ratelimited("%s: fail for partition %d\n", + __func__, bio->bi_partno); ret = -EIO; } rcu_read_unlock(); @@ -2051,7 +2054,7 @@ generic_make_request_checks(struct bio *bio) q = bio->bi_disk->queue; if (unlikely(!q)) { - printk(KERN_ERR + printk_ratelimited(KERN_ERR "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); @@ -2630,7 +2633,8 @@ struct request *blk_peek_request(struct request_queue *q) __blk_end_request_all(rq, ret == BLKPREP_INVALID ? BLK_STS_TARGET : BLK_STS_IOERR); } else { - printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); + printk_ratelimited(KERN_ERR "%s: bad return=%d\n", + __func__, ret); break; } } @@ -2784,8 +2788,11 @@ bool blk_update_request(struct request *req, blk_status_t error, req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) + if (!blk_rq_is_passthrough(req)) { req->__sector += total_bytes >> 9; + if (req->__dun) + req->__dun += total_bytes >> 12; + } /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { @@ -3148,6 +3155,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); + dst->__dun = blk_rq_dun(src); dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; diff --git a/block/blk-merge.c b/block/blk-merge.c index e8c45fd4a9726e2750462eedd3b2036ed9fc75a0..390f3464322d05de31583e27e8a2a9821afdfa46 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -845,6 +845,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { + if (blk_rq_dun(rq) || bio_dun(bio)) + return ELEVATOR_NO_MERGE; if (req_op(rq) == REQ_OP_DISCARD && queue_max_discard_segments(rq->q) > 1) return ELEVATOR_DISCARD_MERGE; diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index d880a489715976fef528cfc7544956d1ce761b52..4ee7c041bb82b7ea6e1133e03f5675479e0eb8a1 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, - unsigned int bsize) +static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, + unsigned int n) { - unsigned int n = bsize; - for (;;) { unsigned int len_this_page = scatterwalk_pagelen(&walk->out); @@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, n -= len_this_page; scatterwalk_start(&walk->out, sg_next(walk->out.sg)); } - - return bsize; } -static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, - unsigned int n) +static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, + unsigned int n) { scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } static int ablkcipher_walk_next(struct ablkcipher_request *req, @@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req, struct ablkcipher_walk *walk, int err) { struct crypto_tfm *tfm = req->base.tfm; - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) - n = ablkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = ablkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { + ablkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + ablkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); - -err: - walk->total = nbytes; - walk->nbytes = nbytes; + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(req->base.flags); return ablkcipher_walk_next(req, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != req->info) memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); kfree(walk->iv_buffer); - return err; } EXPORT_SYMBOL_GPL(ablkcipher_walk_done); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 6c43a0a17a5514b46f9300e21ded357defb39d50..d84c6920ada9fe0b814df033b066edcb1e06e22a 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, - unsigned int bsize) +static inline void blkcipher_done_slow(struct blkcipher_walk *walk, + unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = blkcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, 1); - return bsize; } -static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, - unsigned int n) +static inline void blkcipher_done_fast(struct blkcipher_walk *walk, + unsigned int n) { if (walk->flags & BLKCIPHER_WALK_COPY) { blkcipher_map_dst(walk); @@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) - n = blkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = blkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) { + blkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + blkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); -err: - walk->total = nbytes; - walk->nbytes = nbytes; - - if (nbytes) { + if (more) { crypto_yield(desc->flags); return blkcipher_walk_next(desc, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != desc->info) memcpy(desc->info, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); - return err; } EXPORT_SYMBOL_GPL(blkcipher_walk_done); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 11af5fd6a443570550e1dac5b0a429b2cae801b1..e319421a32e7de4fabadddf116f64eeea790a113 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); - return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { - unsigned int n = walk->nbytes - err; - unsigned int nbytes; + unsigned int n; /* bytes processed */ + bool more; + + if (unlikely(err < 0)) + goto finish; - nbytes = walk->total - n; + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - if (unlikely(err < 0)) { - nbytes = 0; - n = 0; - } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | - SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF)))) { + if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | + SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { @@ -131,28 +132,28 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err) skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ err = -EINVAL; - nbytes = 0; - } else - n = skcipher_done_slow(walk, n); + goto finish; + } + skcipher_done_slow(walk, n); + goto already_advanced; } - if (err > 0) - err = 0; - - walk->total = nbytes; - walk->nbytes = nbytes; - scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); +already_advanced: + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } + err = 0; +finish: + walk->nbytes = 0; /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) @@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) unsigned size; u8 *iv; - aligned_bs = ALIGN(bs, alignmask); + aligned_bs = ALIGN(bs, alignmask + 1); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; diff --git a/crypto/vmac.c b/crypto/vmac.c index df76a816cfb22f68ac173d3ef01e2e2f9e166c72..bb2fc787d61568d3c0a871f1fc2e04cfc0732437 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -1,6 +1,10 @@ /* - * Modified to interface to the Linux kernel + * VMAC: Message Authentication Code using Universal Hashing + * + * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 + * * Copyright (c) 2009, Intel Corporation. + * Copyright (c) 2018, Google Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,14 +20,15 @@ * Place - Suite 330, Boston, MA 02111-1307 USA. */ -/* -------------------------------------------------------------------------- - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Please send bug reports to the authors. - * Last modified: 17 APR 08, 1700 PDT - * ----------------------------------------------------------------------- */ +/* + * Derived from: + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Last modified: 17 APR 08, 1700 PDT + */ +#include #include #include #include @@ -31,9 +36,35 @@ #include #include #include -#include #include +/* + * User definable settings. + */ +#define VMAC_TAG_LEN 64 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ + +/* per-transform (per-key) context */ +struct vmac_tfm_ctx { + struct crypto_cipher *cipher; + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; + u64 polykey[2*VMAC_TAG_LEN/64]; + u64 l3key[2*VMAC_TAG_LEN/64]; +}; + +/* per-request context */ +struct vmac_desc_ctx { + union { + u8 partial[VMAC_NHBYTES]; /* partial block */ + __le64 partial_words[VMAC_NHBYTES / 8]; + }; + unsigned int partial_size; /* size of the partial block */ + bool first_block_processed; + u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ +}; + /* * Constants and masks */ @@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo, } while (0) #endif -static void vhash_abort(struct vmac_ctx *ctx) -{ - ctx->polytmp[0] = ctx->polykey[0] ; - ctx->polytmp[1] = ctx->polykey[1] ; - ctx->first_block_processed = 0; -} - static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; @@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) return rl; } -static void vhash_update(const unsigned char *m, - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ - struct vmac_ctx *ctx) +/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ +static void vhash_blocks(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx, + const __le64 *mptr, unsigned int blocks) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - if (!mbytes) - return; - - BUG_ON(mbytes % VMAC_NHBYTES); - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ - - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - - if (!ctx->first_block_processed) { - ctx->first_block_processed = 1; + const u64 *kptr = tctx->nhkey; + const u64 pkh = tctx->polykey[0]; + const u64 pkl = tctx->polykey[1]; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + u64 rh, rl; + + if (!dctx->first_block_processed) { + dctx->first_block_processed = true; nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; ADD128(ch, cl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; + blocks--; } - while (i--) { + while (blocks--) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); } - ctx->polytmp[0] = ch; - ctx->polytmp[1] = cl; + dctx->polytmp[0] = ch; + dctx->polytmp[1] = cl; } -static u64 vhash(unsigned char m[], unsigned int mbytes, - u64 *tagl, struct vmac_ctx *ctx) +static int vmac_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i, remaining; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; - remaining = mbytes % VMAC_NHBYTES; - - if (ctx->first_block_processed) { - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - } else if (i) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; - } else if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - goto do_l3; - } else {/* Empty String */ - ch = pkh; cl = pkl; - goto do_l3; - } - - while (i--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - } - -do_l3: - vhash_abort(ctx); - remaining *= 8; - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); -} + struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); + __be64 out[2]; + u8 in[16] = { 0 }; + unsigned int i; + int err; -static u64 vmac(unsigned char m[], unsigned int mbytes, - const unsigned char n[16], u64 *tagl, - struct vmac_ctx_t *ctx) -{ - u64 *in_n, *out_p; - u64 p, h; - int i; - - in_n = ctx->__vmac_ctx.cached_nonce; - out_p = ctx->__vmac_ctx.cached_aes; - - i = n[15] & 1; - if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { - in_n[0] = *(u64 *)(n); - in_n[1] = *(u64 *)(n+8); - ((unsigned char *)in_n)[15] &= 0xFE; - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out_p, (unsigned char *)in_n); - - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); + if (keylen != VMAC_KEY_LEN) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; } - p = be64_to_cpup(out_p + i); - h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); - return le64_to_cpu(p + h); -} -static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) -{ - u64 in[2] = {0}, out[2]; - unsigned i; - int err = 0; - - err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); + err = crypto_cipher_setkey(tctx->cipher, key, keylen); if (err) return err; /* Fill nh key */ - ((unsigned char *)in)[0] = 0x80; - for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); - ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; + in[0] = 0x80; + for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->nhkey[i] = be64_to_cpu(out[0]); + tctx->nhkey[i+1] = be64_to_cpu(out[1]); + in[15]++; } /* Fill poly key */ - ((unsigned char *)in)[0] = 0xC0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.polytmp[i] = - ctx->__vmac_ctx.polykey[i] = - be64_to_cpup(out) & mpoly; - ctx->__vmac_ctx.polytmp[i+1] = - ctx->__vmac_ctx.polykey[i+1] = - be64_to_cpup(out+1) & mpoly; - ((unsigned char *)in)[15] += 1; + in[0] = 0xC0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; + tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; + in[15]++; } /* Fill ip key */ - ((unsigned char *)in)[0] = 0xE0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { + in[0] = 0xE0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { do { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); - ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; - } while (ctx->__vmac_ctx.l3key[i] >= p64 - || ctx->__vmac_ctx.l3key[i+1] >= p64); + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->l3key[i] = be64_to_cpu(out[0]); + tctx->l3key[i+1] = be64_to_cpu(out[1]); + in[15]++; + } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); } - /* Invalidate nonce/aes cache and reset other elements */ - ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ - ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ - ctx->__vmac_ctx.first_block_processed = 0; - - return err; + return 0; } -static int vmac_setkey(struct crypto_shash *parent, - const u8 *key, unsigned int keylen) +static int vmac_init(struct shash_desc *desc) { - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - if (keylen != VMAC_KEY_LEN) { - crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return vmac_set_key((u8 *)key, ctx); -} - -static int vmac_init(struct shash_desc *pdesc) -{ + dctx->partial_size = 0; + dctx->first_block_processed = false; + memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); return 0; } -static int vmac_update(struct shash_desc *pdesc, const u8 *p, - unsigned int len) +static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - int expand; - int min; - - expand = VMAC_NHBYTES - ctx->partial_size > 0 ? - VMAC_NHBYTES - ctx->partial_size : 0; - - min = len < expand ? len : expand; - - memcpy(ctx->partial + ctx->partial_size, p, min); - ctx->partial_size += min; - - if (len < expand) - return 0; - - vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); - ctx->partial_size = 0; - - len -= expand; - p += expand; + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int n; + + if (dctx->partial_size) { + n = min(len, VMAC_NHBYTES - dctx->partial_size); + memcpy(&dctx->partial[dctx->partial_size], p, n); + dctx->partial_size += n; + p += n; + len -= n; + if (dctx->partial_size == VMAC_NHBYTES) { + vhash_blocks(tctx, dctx, dctx->partial_words, 1); + dctx->partial_size = 0; + } + } - if (len % VMAC_NHBYTES) { - memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), - len % VMAC_NHBYTES); - ctx->partial_size = len % VMAC_NHBYTES; + if (len >= VMAC_NHBYTES) { + n = round_down(len, VMAC_NHBYTES); + /* TODO: 'p' may be misaligned here */ + vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); + p += n; + len -= n; } - vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); + if (len) { + memcpy(dctx->partial, p, len); + dctx->partial_size = len; + } return 0; } -static int vmac_final(struct shash_desc *pdesc, u8 *out) +static u64 vhash_final(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - vmac_t mac; - u8 nonce[16] = {}; - - /* vmac() ends up accessing outside the array bounds that - * we specify. In appears to access up to the next 2-word - * boundary. We'll just be uber cautious and zero the - * unwritten bytes in the buffer. - */ - if (ctx->partial_size) { - memset(ctx->partial + ctx->partial_size, 0, - VMAC_NHBYTES - ctx->partial_size); + unsigned int partial = dctx->partial_size; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + + /* L1 and L2-hash the final block if needed */ + if (partial) { + /* Zero-pad to next 128-bit boundary */ + unsigned int n = round_up(partial, 16); + u64 rh, rl; + + memset(&dctx->partial[partial], 0, n - partial); + nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); + rh &= m62; + if (dctx->first_block_processed) + poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], + rh, rl); + else + ADD128(ch, cl, rh, rl); } - mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); - memcpy(out, &mac, sizeof(vmac_t)); - memzero_explicit(&mac, sizeof(vmac_t)); - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); - ctx->partial_size = 0; + + /* L3-hash the 128-bit output of L2-hash */ + return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); +} + +static int vmac_final(struct shash_desc *desc, u8 *out) +{ + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + static const u8 nonce[16] = {}; /* TODO: this is insecure */ + union { + u8 bytes[16]; + __be64 pads[2]; + } block; + int index; + u64 hash, pad; + + /* Finish calculating the VHASH of the message */ + hash = vhash_final(tctx, dctx); + + /* Generate pseudorandom pad by encrypting the nonce */ + memcpy(&block, nonce, 16); + index = block.bytes[15] & 1; + block.bytes[15] &= ~1; + crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes); + pad = be64_to_cpu(block.pads[index]); + + /* The VMAC is the sum of VHASH and the pseudorandom pad */ + put_unaligned_le64(hash + pad, out); return 0; } static int vmac_init_tfm(struct crypto_tfm *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = cipher; + tctx->cipher = cipher; return 0; } static void vmac_exit_tfm(struct crypto_tfm *tfm) { - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(tctx->cipher); } static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (IS_ERR(alg)) return PTR_ERR(alg); + err = -EINVAL; + if (alg->cra_blocksize != 16) + goto out_put_alg; + inst = shash_alloc_instance("vmac", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) @@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.digestsize = sizeof(vmac_t); - inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); + inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); inst->alg.base.cra_init = vmac_init_tfm; inst->alg.base.cra_exit = vmac_exit_tfm; + inst->alg.descsize = sizeof(struct vmac_desc_ctx); + inst->alg.digestsize = VMAC_TAG_LEN / 8; inst->alg.init = vmac_init; inst->alg.update = vmac_update; inst->alg.final = vmac_final; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 572b6c7303edce1aa6f097c310bad0aa3b218661..f14695e744d0313dfba260371a07944bf2f18da6 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -114,19 +114,7 @@ static DEFINE_MUTEX(ghes_list_mutex); * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so * the fixmap is used instead. - */ - -/* - * Two virtual pages are used, one for IRQ/PROCESS context, the other for - * NMI context (optionally). - */ -#define GHES_IOREMAP_PAGES 2 -#define GHES_IOREMAP_IRQ_PAGE(base) (base) -#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE) - -/* virtual memory area for atomic ioremap */ -static struct vm_struct *ghes_ioremap_area; -/* + * * These 2 spinlocks are used to prevent the fixmap entries from being used * simultaneously. */ @@ -141,23 +129,6 @@ static atomic_t ghes_estatus_cache_alloced; static int ghes_panic_timeout __read_mostly = 30; -static int ghes_ioremap_init(void) -{ - ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, - VM_IOREMAP, VMALLOC_START, VMALLOC_END); - if (!ghes_ioremap_area) { - pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); - return -ENOMEM; - } - - return 0; -} - -static void ghes_ioremap_exit(void) -{ - free_vm_area(ghes_ioremap_area); -} - static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) { phys_addr_t paddr; @@ -1247,13 +1218,9 @@ static int __init ghes_init(void) ghes_nmi_init_cxt(); - rc = ghes_ioremap_init(); - if (rc) - goto err; - rc = ghes_estatus_pool_init(); if (rc) - goto err_ioremap_exit; + goto err; rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX); @@ -1277,8 +1244,6 @@ static int __init ghes_init(void) return 0; err_pool_exit: ghes_estatus_pool_exit(); -err_ioremap_exit: - ghes_ioremap_exit(); err: return rc; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2ef0ad6a33d6c692da5465a817ebfde87620c8c6..7a0af16f86f204f60a9fc4202a34f2853a91518f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -338,6 +338,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), }, }, + { + .callback = init_nvs_save_s3, + .ident = "Asus 1025C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1025C"), + }, + }, /* * https://bugzilla.kernel.org/show_bug.cgi?id=189431 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index ff58c8efa1efaf86e704688402a497eb10ae1c3a..1505da863ea42b61d54c319770f63c359e51319c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -618,16 +618,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); +static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, &dev_attr_spectre_v1.attr, &dev_attr_spectre_v2.attr, &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, NULL }; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 429ca8ed7e518087bc1ddaebd9f4c8393eb5179e..1132d17f48d79032777e2b7d492524c9726a07c0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -151,6 +151,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, ~d->mask_buf[i]); else ret = regmap_write(map, reg, d->mask_buf[i]); + /* some chips needs to clear ack reg after ack */ + if (d->chip->clear_ack) + ret = regmap_write(map, reg, 0x0); if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", reg, ret); @@ -361,6 +364,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) reg = chip->ack_base + (i * map->reg_stride * data->irq_reg_stride); ret = regmap_write(map, reg, data->status_buf[i]); + /* some chips needs to clear ack reg after ack */ + if (chip->clear_ack) + ret = regmap_write(map, reg, 0x0); if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", reg, ret); @@ -569,6 +575,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else ret = regmap_write(map, reg, d->status_buf[i] & d->mask_buf[i]); + /* some chips needs to clear ack reg after ack */ + if (chip->clear_ack) + ret = regmap_write(map, reg, 0x0); if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", reg, ret); diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index ac3a31d433b2e9f698d051bfcd06f76d0efad971..6620c7972e90c243cc6c1cbd80069d0f3773d766 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -15,6 +15,20 @@ config ZRAM See zram.txt for more information. +config ZRAM_DEDUP + bool "Deduplication support for ZRAM data" + depends on ZRAM + default n + help + Deduplicate ZRAM data to reduce amount of memory consumption. + Advantage largely depends on the workload. In some cases, this + option reduces memory usage to the half. However, if there is no + duplicated data, the amount of memory consumption would be + increased due to additional metadata usage. And, there is + computation time trade-off. Please check the benefit before + enabling this option. Experiment shows the positive effect when + the zram is used as blockdev and is used to store build output. + config ZRAM_WRITEBACK bool "Write back incompressible page to backing device" depends on ZRAM diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index 9e2b79e9a990b59ecc7868dd78ae1cc5cd13a595..d7204ef6ee53e1c34b1bc73f46848633b3aacab6 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile @@ -1,3 +1,4 @@ -zram-y := zcomp.o zram_drv.o +zram-y := zcomp.o zram_drv.o +zram-$(CONFIG_ZRAM_DEDUP) += zram_dedup.o obj-$(CONFIG_ZRAM) += zram.o diff --git a/drivers/block/zram/zram_dedup.c b/drivers/block/zram/zram_dedup.c new file mode 100644 index 0000000000000000000000000000000000000000..14c4988f8ff73c122bd0fb727e260a5077c1035f --- /dev/null +++ b/drivers/block/zram/zram_dedup.c @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2017 Joonsoo Kim. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include + +#include "zram_drv.h" + +/* One slot will contain 128 pages theoretically */ +#define ZRAM_HASH_SHIFT 7 +#define ZRAM_HASH_SIZE_MIN (1 << 10) +#define ZRAM_HASH_SIZE_MAX (1 << 31) + +u64 zram_dedup_dup_size(struct zram *zram) +{ + return (u64)atomic64_read(&zram->stats.dup_data_size); +} + +u64 zram_dedup_meta_size(struct zram *zram) +{ + return (u64)atomic64_read(&zram->stats.meta_data_size); +} + +static u32 zram_dedup_checksum(unsigned char *mem) +{ + return jhash(mem, PAGE_SIZE, 0); +} + +void zram_dedup_insert(struct zram *zram, struct zram_entry *new, + u32 checksum) +{ + struct zram_hash *hash; + struct rb_root *rb_root; + struct rb_node **rb_node, *parent = NULL; + struct zram_entry *entry; + + if (!zram_dedup_enabled(zram)) + return; + + new->checksum = checksum; + hash = &zram->hash[checksum % zram->hash_size]; + rb_root = &hash->rb_root; + + spin_lock(&hash->lock); + rb_node = &rb_root->rb_node; + while (*rb_node) { + parent = *rb_node; + entry = rb_entry(parent, struct zram_entry, rb_node); + if (checksum < entry->checksum) + rb_node = &parent->rb_left; + else if (checksum > entry->checksum) + rb_node = &parent->rb_right; + else + rb_node = &parent->rb_left; + } + + rb_link_node(&new->rb_node, parent, rb_node); + rb_insert_color(&new->rb_node, rb_root); + spin_unlock(&hash->lock); +} + +static bool zram_dedup_match(struct zram *zram, struct zram_entry *entry, + unsigned char *mem) +{ + bool match = false; + unsigned char *cmem; + struct zcomp_strm *zstrm; + + cmem = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_RO); + if (entry->len == PAGE_SIZE) { + match = !memcmp(mem, cmem, PAGE_SIZE); + } else { + zstrm = zcomp_stream_get(zram->comp); + if (!zcomp_decompress(zstrm, cmem, entry->len, zstrm->buffer)) + match = !memcmp(mem, zstrm->buffer, PAGE_SIZE); + zcomp_stream_put(zram->comp); + } + zs_unmap_object(zram->mem_pool, entry->handle); + + return match; +} + +static unsigned long zram_dedup_put(struct zram *zram, + struct zram_entry *entry) +{ + struct zram_hash *hash; + u32 checksum; + + checksum = entry->checksum; + hash = &zram->hash[checksum % zram->hash_size]; + + spin_lock(&hash->lock); + + entry->refcount--; + if (!entry->refcount) + rb_erase(&entry->rb_node, &hash->rb_root); + else + atomic64_sub(entry->len, &zram->stats.dup_data_size); + + spin_unlock(&hash->lock); + + return entry->refcount; +} + +static struct zram_entry *__zram_dedup_get(struct zram *zram, + struct zram_hash *hash, unsigned char *mem, + struct zram_entry *entry) +{ + struct zram_entry *tmp, *prev = NULL; + struct rb_node *rb_node; + + /* find left-most entry with same checksum */ + while ((rb_node = rb_prev(&entry->rb_node))) { + tmp = rb_entry(rb_node, struct zram_entry, rb_node); + if (tmp->checksum != entry->checksum) + break; + + entry = tmp; + } + +again: + entry->refcount++; + atomic64_add(entry->len, &zram->stats.dup_data_size); + spin_unlock(&hash->lock); + + if (prev) + zram_entry_free(zram, prev); + + if (zram_dedup_match(zram, entry, mem)) + return entry; + + spin_lock(&hash->lock); + tmp = NULL; + rb_node = rb_next(&entry->rb_node); + if (rb_node) + tmp = rb_entry(rb_node, struct zram_entry, rb_node); + + if (tmp && (tmp->checksum == entry->checksum)) { + prev = entry; + entry = tmp; + goto again; + } + + spin_unlock(&hash->lock); + zram_entry_free(zram, entry); + + return NULL; +} + +static struct zram_entry *zram_dedup_get(struct zram *zram, + unsigned char *mem, u32 checksum) +{ + struct zram_hash *hash; + struct zram_entry *entry; + struct rb_node *rb_node; + + hash = &zram->hash[checksum % zram->hash_size]; + + spin_lock(&hash->lock); + rb_node = hash->rb_root.rb_node; + while (rb_node) { + entry = rb_entry(rb_node, struct zram_entry, rb_node); + if (checksum == entry->checksum) + return __zram_dedup_get(zram, hash, mem, entry); + + if (checksum < entry->checksum) + rb_node = rb_node->rb_left; + else + rb_node = rb_node->rb_right; + } + spin_unlock(&hash->lock); + + return NULL; +} + +struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page, + u32 *checksum) +{ + void *mem; + struct zram_entry *entry; + + if (!zram_dedup_enabled(zram)) + return NULL; + + mem = kmap_atomic(page); + *checksum = zram_dedup_checksum(mem); + + entry = zram_dedup_get(zram, mem, *checksum); + kunmap_atomic(mem); + + return entry; +} + +void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry, + unsigned long handle, unsigned int len) +{ + if (!zram_dedup_enabled(zram)) + return; + + entry->handle = handle; + entry->refcount = 1; + entry->len = len; +} + +bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry) +{ + if (!zram_dedup_enabled(zram)) + return true; + + if (zram_dedup_put(zram, entry)) + return false; + + return true; +} + +int zram_dedup_init(struct zram *zram, size_t num_pages) +{ + int i; + struct zram_hash *hash; + + if (!zram_dedup_enabled(zram)) + return 0; + + zram->hash_size = num_pages >> ZRAM_HASH_SHIFT; + zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size); + zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size); + zram->hash = vzalloc(zram->hash_size * sizeof(struct zram_hash)); + if (!zram->hash) { + pr_err("Error allocating zram entry hash\n"); + return -ENOMEM; + } + + for (i = 0; i < zram->hash_size; i++) { + hash = &zram->hash[i]; + spin_lock_init(&hash->lock); + hash->rb_root = RB_ROOT; + } + + return 0; +} + +void zram_dedup_fini(struct zram *zram) +{ + vfree(zram->hash); + zram->hash = NULL; + zram->hash_size = 0; +} diff --git a/drivers/block/zram/zram_dedup.h b/drivers/block/zram/zram_dedup.h new file mode 100644 index 0000000000000000000000000000000000000000..8ab267b0b9567638baac86630a1062ddcae37aa0 --- /dev/null +++ b/drivers/block/zram/zram_dedup.h @@ -0,0 +1,45 @@ +#ifndef _ZRAM_DEDUP_H_ +#define _ZRAM_DEDUP_H_ + +struct zram; +struct zram_entry; + +#ifdef CONFIG_ZRAM_DEDUP + +u64 zram_dedup_dup_size(struct zram *zram); +u64 zram_dedup_meta_size(struct zram *zram); + +void zram_dedup_insert(struct zram *zram, struct zram_entry *new, + u32 checksum); +struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page, + u32 *checksum); + +void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry, + unsigned long handle, unsigned int len); +bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry); + +int zram_dedup_init(struct zram *zram, size_t num_pages); +void zram_dedup_fini(struct zram *zram); +#else + +static inline u64 zram_dedup_dup_size(struct zram *zram) { return 0; } +static inline u64 zram_dedup_meta_size(struct zram *zram) { return 0; } + +static inline void zram_dedup_insert(struct zram *zram, struct zram_entry *new, + u32 checksum) { } +static inline struct zram_entry *zram_dedup_find(struct zram *zram, + struct page *page, u32 *checksum) { return NULL; } + +static inline void zram_dedup_init_entry(struct zram *zram, + struct zram_entry *entry, unsigned long handle, + unsigned int len) { } +static inline bool zram_dedup_put_entry(struct zram *zram, + struct zram_entry *entry) { return true; } + +static inline int zram_dedup_init(struct zram *zram, + size_t num_pages) { return 0; } +static inline void zram_dedup_fini(struct zram *zram) { } + +#endif + +#endif /* _ZRAM_DEDUP_H_ */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 036c9a37b2269b34e64dc3bb58b123855538897d..2e35633ac188d401ffb38ea8d40787c309782cab 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -57,14 +57,15 @@ static inline struct zram *dev_to_zram(struct device *dev) return (struct zram *)dev_to_disk(dev)->private_data; } -static unsigned long zram_get_handle(struct zram *zram, u32 index) +static struct zram_entry *zram_get_entry(struct zram *zram, u32 index) { - return zram->table[index].handle; + return zram->table[index].entry; } -static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) +static void zram_set_entry(struct zram *zram, u32 index, + struct zram_entry *entry) { - zram->table[index].handle = handle; + zram->table[index].entry = entry; } /* flag operations require table entry bit_spin_lock() being held */ @@ -658,6 +659,41 @@ static ssize_t comp_algorithm_store(struct device *dev, return len; } +static ssize_t use_dedup_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + bool val; + struct zram *zram = dev_to_zram(dev); + + down_read(&zram->init_lock); + val = zram->use_dedup; + up_read(&zram->init_lock); + + return scnprintf(buf, PAGE_SIZE, "%d\n", (int)val); +} + +#ifdef CONFIG_ZRAM_DEDUP +static ssize_t use_dedup_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + int val; + struct zram *zram = dev_to_zram(dev); + + if (kstrtoint(buf, 10, &val) || (val != 0 && val != 1)) + return -EINVAL; + + down_write(&zram->init_lock); + if (init_done(zram)) { + up_write(&zram->init_lock); + pr_info("Can't change dedup usage for initialized device\n"); + return -EBUSY; + } + zram->use_dedup = val; + up_write(&zram->init_lock); + return len; +} +#endif + static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -714,14 +750,16 @@ static ssize_t mm_stat_show(struct device *dev, max_used = atomic_long_read(&zram->stats.max_used_pages); ret = scnprintf(buf, PAGE_SIZE, - "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", + "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n", orig_size << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.compr_data_size), mem_used << PAGE_SHIFT, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.same_pages), - pool_stats.pages_compacted); + pool_stats.pages_compacted, + zram_dedup_dup_size(zram), + zram_dedup_meta_size(zram)); up_read(&zram->init_lock); return ret; @@ -748,6 +786,15 @@ static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); static DEVICE_ATTR_RO(debug_stat); +static unsigned long zram_entry_handle(struct zram *zram, + struct zram_entry *entry) +{ + if (zram_dedup_enabled(zram)) + return entry->handle; + else + return (unsigned long)entry; +} + static void zram_slot_lock(struct zram *zram, u32 index) { bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value); @@ -758,6 +805,47 @@ static void zram_slot_unlock(struct zram *zram, u32 index) bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); } +static struct zram_entry *zram_entry_alloc(struct zram *zram, + unsigned int len, gfp_t flags) +{ + struct zram_entry *entry; + unsigned long handle; + + handle = zs_malloc(zram->mem_pool, len, flags); + if (!handle) + return NULL; + + if (!zram_dedup_enabled(zram)) + return (struct zram_entry *)handle; + + entry = kzalloc(sizeof(*entry), + flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA)); + if (!entry) { + zs_free(zram->mem_pool, handle); + return NULL; + } + + zram_dedup_init_entry(zram, entry, handle, len); + atomic64_add(sizeof(*entry), &zram->stats.meta_data_size); + + return entry; +} + +void zram_entry_free(struct zram *zram, struct zram_entry *entry) +{ + if (!zram_dedup_put_entry(zram, entry)) + return; + + zs_free(zram->mem_pool, zram_entry_handle(zram, entry)); + + if (!zram_dedup_enabled(zram)) + return; + + kfree(entry); + + atomic64_sub(sizeof(*entry), &zram->stats.meta_data_size); +} + static void zram_meta_free(struct zram *zram, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; @@ -768,6 +856,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize) zram_free_page(zram, index); zs_destroy_pool(zram->mem_pool); + zram_dedup_fini(zram); vfree(zram->table); } @@ -786,6 +875,12 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) return false; } + if (zram_dedup_init(zram, num_pages)) { + vfree(zram->table); + zs_destroy_pool(zram->mem_pool); + return false; + } + return true; } @@ -796,7 +891,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) */ static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle; + struct zram_entry *entry; if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) { zram_wb_clear(zram, index); @@ -816,17 +911,17 @@ static void zram_free_page(struct zram *zram, size_t index) return; } - handle = zram_get_handle(zram, index); - if (!handle) + entry = zram_get_entry(zram, index); + if (!entry) return; - zs_free(zram->mem_pool, handle); + zram_entry_free(zram, entry); atomic64_sub(zram_get_obj_size(zram, index), &zram->stats.compr_data_size); atomic64_dec(&zram->stats.pages_stored); - zram_set_handle(zram, index, 0); + zram_set_entry(zram, index, NULL); zram_set_obj_size(zram, index, 0); } @@ -834,7 +929,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, struct bio *bio, bool partial_io) { int ret; - unsigned long handle; + struct zram_entry *entry; unsigned int size; void *src, *dst; @@ -856,12 +951,12 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, } zram_slot_lock(zram, index); - handle = zram_get_handle(zram, index); - if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { + entry = zram_get_entry(zram, index); + if (!entry || zram_test_flag(zram, index, ZRAM_SAME)) { unsigned long value; void *mem; - value = handle ? zram_get_element(zram, index) : 0; + value = entry ? zram_get_element(zram, index) : 0; mem = kmap_atomic(page); zram_fill_page(mem, PAGE_SIZE, value); kunmap_atomic(mem); @@ -871,7 +966,8 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, size = zram_get_obj_size(zram, index); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_map_object(zram->mem_pool, + zram_entry_handle(zram, entry), ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); memcpy(dst, src, PAGE_SIZE); @@ -885,7 +981,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, kunmap_atomic(dst); zcomp_stream_put(zram->comp); } - zs_unmap_object(zram->mem_pool, handle); + zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry)); zram_slot_unlock(zram, index); /* Should NEVER happen. Return bio error if it does. */ @@ -933,11 +1029,12 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, { int ret = 0; unsigned long alloced_pages; - unsigned long handle = 0; + struct zram_entry *entry = NULL; unsigned int comp_len = 0; void *src, *dst, *mem; struct zcomp_strm *zstrm; struct page *page = bvec->bv_page; + u32 checksum; unsigned long element = 0; enum zram_pageflags flags = 0; bool allow_wb = true; @@ -952,6 +1049,12 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, } kunmap_atomic(mem); + entry = zram_dedup_find(zram, page, &checksum); + if (entry) { + comp_len = entry->len; + goto out; + } + compress_again: zstrm = zcomp_stream_get(zram->comp); src = kmap_atomic(page); @@ -961,7 +1064,8 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, if (unlikely(ret)) { zcomp_stream_put(zram->comp); pr_err("Compression failed! err=%d\n", ret); - zs_free(zram->mem_pool, handle); + if (entry) + zram_entry_free(zram, entry); return ret; } @@ -981,32 +1085,32 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, } /* - * handle allocation has 2 paths: + * entry allocation has 2 paths: * a) fast path is executed with preemption disabled (for * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, * since we can't sleep; * b) slow path enables preemption and attempts to allocate * the page with __GFP_DIRECT_RECLAIM bit set. we have to * put per-cpu compression stream and, thus, to re-do - * the compression once handle is allocated. + * the compression once entry is allocated. * - * if we have a 'non-null' handle here then we are coming - * from the slow path and handle has already been allocated. + * if we have a 'non-null' entry here then we are coming + * from the slow path and entry has already been allocated. */ - if (!handle) - handle = zs_malloc(zram->mem_pool, comp_len, + if (!entry) + entry = zram_entry_alloc(zram, comp_len, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN | __GFP_HIGHMEM | __GFP_MOVABLE | __GFP_CMA); - if (!handle) { + if (!entry) { zcomp_stream_put(zram->comp); atomic64_inc(&zram->stats.writestall); - handle = zs_malloc(zram->mem_pool, comp_len, + entry = zram_entry_alloc(zram, comp_len, GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE | __GFP_CMA); - if (handle) + if (entry) goto compress_again; return -ENOMEM; } @@ -1016,11 +1120,12 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, if (zram->limit_pages && alloced_pages > zram->limit_pages) { zcomp_stream_put(zram->comp); - zs_free(zram->mem_pool, handle); + zram_entry_free(zram, entry); return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); + dst = zs_map_object(zram->mem_pool, + zram_entry_handle(zram, entry), ZS_MM_WO); src = zstrm->buffer; if (comp_len == PAGE_SIZE) @@ -1030,8 +1135,9 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, kunmap_atomic(src); zcomp_stream_put(zram->comp); - zs_unmap_object(zram->mem_pool, handle); + zs_unmap_object(zram->mem_pool, zram_entry_handle(zram, entry)); atomic64_add(comp_len, &zram->stats.compr_data_size); + zram_dedup_insert(zram, entry, checksum); out: /* * Free memory associated with this sector @@ -1044,7 +1150,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, zram_set_flag(zram, index, flags); zram_set_element(zram, index, element); } else { - zram_set_handle(zram, index, handle); + zram_set_entry(zram, index, entry); zram_set_obj_size(zram, index, comp_len); } zram_slot_unlock(zram, index); @@ -1457,6 +1563,11 @@ static DEVICE_ATTR_RW(comp_algorithm); #ifdef CONFIG_ZRAM_WRITEBACK static DEVICE_ATTR_RW(backing_dev); #endif +#ifdef CONFIG_ZRAM_DEDUP +static DEVICE_ATTR_RW(use_dedup); +#else +static DEVICE_ATTR_RO(use_dedup); +#endif static struct attribute *zram_disk_attrs[] = { &dev_attr_disksize.attr, @@ -1470,6 +1581,7 @@ static struct attribute *zram_disk_attrs[] = { #ifdef CONFIG_ZRAM_WRITEBACK &dev_attr_backing_dev.attr, #endif + &dev_attr_use_dedup.attr, &dev_attr_io_stat.attr, &dev_attr_mm_stat.attr, &dev_attr_debug_stat.attr, diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 31762db861e38486a86c9ea060ec841f90f142df..52dcd39e7b073837cb47f2bc3cb88beb660519df 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -18,8 +18,10 @@ #include #include #include +#include #include "zcomp.h" +#include "zram_dedup.h" /*-- Configurable parameters */ @@ -70,10 +72,18 @@ enum zram_pageflags { /*-- Data structures */ +struct zram_entry { + struct rb_node rb_node; + u32 len; + u32 checksum; + unsigned long refcount; + unsigned long handle; +}; + /* Allocated for each disk page */ struct zram_table_entry { union { - unsigned long handle; + struct zram_entry *entry; unsigned long element; }; unsigned long value; @@ -91,6 +101,16 @@ struct zram_stats { atomic64_t pages_stored; /* no. of pages currently stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */ atomic64_t writestall; /* no. of write slow paths */ + atomic64_t dup_data_size; /* + * compressed size of pages + * duplicated + */ + atomic64_t meta_data_size; /* size of zram_entries */ +}; + +struct zram_hash { + spinlock_t lock; + struct rb_root rb_root; }; struct zram { @@ -98,6 +118,8 @@ struct zram { struct zs_pool *mem_pool; struct zcomp *comp; struct gendisk *disk; + struct zram_hash *hash; + size_t hash_size; /* Prevent concurrent execution of device init */ struct rw_semaphore init_lock; /* @@ -116,6 +138,7 @@ struct zram { * zram is claimed so open request will be failed */ bool claim; /* Protected by bdev->bd_mutex */ + bool use_dedup; #ifdef CONFIG_ZRAM_WRITEBACK struct file *backing_dev; struct block_device *bdev; @@ -125,4 +148,15 @@ struct zram { spinlock_t bitmap_lock; #endif }; + +static inline bool zram_dedup_enabled(struct zram *zram) +{ +#ifdef CONFIG_ZRAM_DEDUP + return zram->use_dedup; +#else + return false; +#endif +} + +void zram_entry_free(struct zram *zram, struct zram_entry *entry); #endif diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 6aef3bde10d70e3cfa3643869c3e38c45d9faf25..c823914b3a80a0ce550dff8244df835b23ec66ec 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -115,12 +115,12 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) struct sk_buff *skb = hu->tx_skb; if (!skb) { - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) skb = hu->proto->dequeue(hu); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); } else { hu->tx_skb = NULL; } @@ -130,7 +130,14 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) int hci_uart_tx_wakeup(struct hci_uart *hu) { - read_lock(&hu->proto_lock); + /* This may be called in an IRQ context, so we can't sleep. Therefore + * we try to acquire the lock only, and if that fails we assume the + * tty is being closed because that is the only time the write lock is + * acquired. If, however, at some point in the future the write lock + * is also acquired in other situations, then this must be revisited. + */ + if (!percpu_down_read_trylock(&hu->proto_lock)) + return 0; if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) goto no_schedule; @@ -145,7 +152,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) schedule_work(&hu->write_work); no_schedule: - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return 0; } @@ -247,12 +254,12 @@ static int hci_uart_flush(struct hci_dev *hdev) tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hu->proto->flush(hu); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return 0; } @@ -275,15 +282,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return -EUNATCH; } hu->proto->enqueue(hu, skb); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); hci_uart_tx_wakeup(hu); @@ -486,7 +493,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - rwlock_init(&hu->proto_lock); + percpu_init_rwsem(&hu->proto_lock); /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); @@ -503,7 +510,6 @@ static void hci_uart_tty_close(struct tty_struct *tty) { struct hci_uart *hu = tty->disc_data; struct hci_dev *hdev; - unsigned long flags; BT_DBG("tty %p", tty); @@ -518,9 +524,9 @@ static void hci_uart_tty_close(struct tty_struct *tty) hci_uart_close(hdev); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - write_lock_irqsave(&hu->proto_lock, flags); + percpu_down_write(&hu->proto_lock); clear_bit(HCI_UART_PROTO_READY, &hu->flags); - write_unlock_irqrestore(&hu->proto_lock, flags); + percpu_up_write(&hu->proto_lock); cancel_work_sync(&hu->write_work); @@ -582,10 +588,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, if (!hu || tty != hu->tty) return; - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return; } @@ -593,7 +599,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, * tty caller */ hu->proto->recv(hu, data, count); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); if (hu->hdev) hu->hdev->stat.byte_rx += count; diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index b725ac4f7ff67dfaab9745668e9839d0a0783de5..52e6d4d1608e3265a1aa68ae8ed1b41fcc9bb49b 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -304,6 +304,7 @@ int hci_uart_register_device(struct hci_uart *hu, hci_set_drvdata(hdev, hu); INIT_WORK(&hu->write_work, hci_uart_write_work); + percpu_init_rwsem(&hu->proto_lock); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index d9cd95d81149b1f9dc43506edcd02b9a86b03526..66e8c68e4607d72e462ea0c093dcefd6fb4636d5 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -87,7 +87,7 @@ struct hci_uart { struct work_struct write_work; const struct hci_uart_proto *proto; - rwlock_t proto_lock; /* Stop work for proto close */ + struct percpu_rw_semaphore proto_lock; /* Stop work for proto close */ void *priv; struct sk_buff *tx_skb; diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c index f6ef83ff7ce084cb805d2fd0aa2dbe619c541196..226dc0dcd931b02509f92407647577d370b4c6a0 100644 --- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c @@ -502,13 +502,15 @@ int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful) arch_info->pcie_state = pci_store_saved_state(pci_dev); pci_disable_device(pci_dev); - ret = pci_set_power_state(pci_dev, PCI_D3hot); - if (ret) { - MHI_ERR("Failed to set D3hot, ret:%d\n", ret); - return ret; - } } + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + /* release the resources */ msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, NULL, 0); mhi_arch_set_bus_request(mhi_cntrl, 0); diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c index 32060a9f96fc56d9c712fb9c8867a9cba2f2d590..5a4906c31fa9673511092ddaaf829e12b0324f4e 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_qcom.c @@ -164,11 +164,18 @@ static int mhi_runtime_suspend(struct device *dev) { int ret = 0; struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); MHI_LOG("Enter\n"); mutex_lock(&mhi_cntrl->pm_mutex); + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + ret = mhi_pm_suspend(mhi_cntrl); if (ret) { MHI_LOG("Abort due to ret:%d\n", ret); diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h index 30333c3fb38533f38a72beaac7824156ddb15f54..7a742b8df5a656669f664a63c14fecc0fc78f36f 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.h +++ b/drivers/bus/mhi/controllers/mhi_qcom.h @@ -21,7 +21,9 @@ #define MHI_PCIE_VENDOR_ID (0x17cb) #define MHI_PCIE_DEBUG_ID (0xffff) -#define MHI_RPM_SUSPEND_TMR_MS (1000) + +/* runtime suspend timer */ +#define MHI_RPM_SUSPEND_TMR_MS (250) #define MHI_PCI_BAR_NUM (0) extern const char * const mhi_ee_str[MHI_EE_MAX]; diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index 132fb705323f7bbf26c8299d575eb9ad6834b5f4..569061c06ad9b2a8d42568546c51876c316c4d92 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -1042,7 +1042,7 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) read_unlock_bh(&mhi_cntrl->pm_lock); ret = wait_event_timeout(mhi_cntrl->state_event, - mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->pm_state == MHI_PM_M0 || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), msecs_to_jiffies(mhi_cntrl->timeout_ms)); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index b16ad9b705c45446a749e38e2cc7a3e3e73b5e7f..00fc37464eb8d541cf763a943298e59f85e789d0 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -272,6 +272,7 @@ struct fastrpc_channel_ctx { struct completion workport; struct notifier_block nb; struct mutex smd_mutex; + struct mutex rpmsg_mutex; int sesscount; int ssrcount; void *handle; @@ -759,7 +760,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, struct fastrpc_session_ctx *sess; struct fastrpc_apps *apps = fl->apps; int cid = fl->cid; - struct fastrpc_channel_ctx *chan = &apps->channel[cid]; + struct fastrpc_channel_ctx *chan = NULL; struct fastrpc_mmap *map = NULL; dma_addr_t region_phys = 0; void *region_vaddr = NULL; @@ -767,6 +768,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, int err = 0, vmid, sgl_index = 0; struct scatterlist *sgl = NULL; + VERIFY(err, cid >= 0 && cid < NUM_CHANNELS); + if (err) + goto bail; + chan = &apps->channel[cid]; + if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap)) return 0; map = kzalloc(sizeof(*map), GFP_KERNEL); @@ -790,7 +796,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, goto bail; map->phys = (uintptr_t)region_phys; map->size = len; - map->va = 0; + map->va = (uintptr_t)region_vaddr; } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) { VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd))); if (err) @@ -1781,16 +1787,21 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, if (fl->ssrcount != channel_ctx->ssrcount) { err = -ECONNRESET; + mutex_unlock(&channel_ctx->smd_mutex); goto bail; } + mutex_unlock(&channel_ctx->smd_mutex); + + mutex_lock(&channel_ctx->rpmsg_mutex); VERIFY(err, !IS_ERR_OR_NULL(channel_ctx->rpdev)); if (err) { err = -ECONNRESET; + mutex_unlock(&channel_ctx->rpmsg_mutex); goto bail; } err = rpmsg_send(channel_ctx->rpdev->ept, (void *)msg, sizeof(*msg)); + mutex_unlock(&channel_ctx->rpmsg_mutex); bail: - mutex_unlock(&channel_ctx->smd_mutex); return err; } @@ -1810,6 +1821,7 @@ static void fastrpc_init(struct fastrpc_apps *me) /* All channels are secure by default except CDSP */ me->channel[i].secure = SECURE_CHANNEL; mutex_init(&me->channel[i].smd_mutex); + mutex_init(&me->channel[i].rpmsg_mutex); } /* Set CDSP channel to non secure */ me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL; @@ -2035,7 +2047,6 @@ static int fastrpc_init_process(struct fastrpc_file *fl, DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_FORCE_NON_COHERENT; err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem); - imem->virt = NULL; if (err) goto bail; fl->init_mem = imem; @@ -2577,14 +2588,14 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl, 1, &rbuf); if (err) goto bail; - rbuf->virt = NULL; - err = fastrpc_mmap_on_dsp(fl, ud->flags, - (uintptr_t)rbuf->virt, + err = fastrpc_mmap_on_dsp(fl, ud->flags, 0, rbuf->phys, rbuf->size, &raddr); if (err) goto bail; rbuf->raddr = raddr; } else { + uintptr_t va_to_dsp; + mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0, (uintptr_t)ud->vaddrin, ud->size, @@ -2592,7 +2603,13 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl, mutex_unlock(&fl->map_mutex); if (err) goto bail; - VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map->va, + + if (ud->flags == ADSP_MMAP_HEAP_ADDR || + ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + va_to_dsp = 0; + else + va_to_dsp = (uintptr_t)map->va; + VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp, map->phys, map->size, &raddr)); if (err) goto bail; @@ -2667,9 +2684,9 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) VERIFY(err, cid >= 0 && cid < NUM_CHANNELS); if (err) goto bail; - mutex_lock(&gcinfo[cid].smd_mutex); + mutex_lock(&gcinfo[cid].rpmsg_mutex); gcinfo[cid].rpdev = rpdev; - mutex_unlock(&gcinfo[cid].smd_mutex); + mutex_unlock(&gcinfo[cid].rpmsg_mutex); pr_info("adsprpc: %s: opened rpmsg channel for %s\n", __func__, gcinfo[cid].subsys); bail: @@ -2701,9 +2718,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) VERIFY(err, cid >= 0 && cid < NUM_CHANNELS); if (err) goto bail; - mutex_lock(&gcinfo[cid].smd_mutex); + mutex_lock(&gcinfo[cid].rpmsg_mutex); gcinfo[cid].rpdev = NULL; - mutex_unlock(&gcinfo[cid].smd_mutex); + mutex_unlock(&gcinfo[cid].rpmsg_mutex); fastrpc_notify_drivers(me, cid); pr_info("adsprpc: %s: closed rpmsg channel of %s\n", __func__, gcinfo[cid].subsys); @@ -2772,13 +2789,16 @@ static int fastrpc_file_free(struct fastrpc_file *fl) struct hlist_node *n = NULL; struct fastrpc_mmap *map = NULL, *lmap = NULL; struct fastrpc_perf *perf = NULL, *fperf = NULL; - int cid; + int cid, err = 0; if (!fl) return 0; cid = fl->cid; - (void)fastrpc_release_current_dsp_process(fl); + err = fastrpc_release_current_dsp_process(fl); + if (err) + pr_err("adsprpc: %s: releasing DSP process failed for %s, returned 0x%x", + __func__, current->comm, err); spin_lock(&fl->apps->hlock); hlist_del_init(&fl->hn); @@ -2985,6 +3005,15 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) if (err) return err; + mutex_lock(&me->channel[cid].rpmsg_mutex); + VERIFY(err, NULL != me->channel[cid].rpdev); + if (err) { + err = -ENOTCONN; + mutex_unlock(&me->channel[cid].rpmsg_mutex); + goto bail; + } + mutex_unlock(&me->channel[cid].rpmsg_mutex); + mutex_lock(&me->channel[cid].smd_mutex); if (me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { @@ -2992,16 +3021,13 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) VERIFY(err, 0); if (err) { err = -ENOTCONN; + mutex_unlock(&me->channel[cid].smd_mutex); goto bail; } } } fl->ssrcount = me->channel[cid].ssrcount; - VERIFY(err, NULL != me->channel[cid].rpdev); - if (err) { - err = -ENOTCONN; - goto bail; - } + if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { mutex_lock(&fl->map_mutex); @@ -3012,9 +3038,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) me->channel[cid].prevssrcount = me->channel[cid].ssrcount; } + mutex_unlock(&me->channel[cid].smd_mutex); bail: - mutex_unlock(&me->channel[cid].smd_mutex); return err; } diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c index f71e57f9c435ffd5c794b432a946eefe262deb95..d8214ae2cdb27e6963a1f0ac5d53955b86694191 100644 --- a/drivers/char/diag/diag_debugfs.c +++ b/drivers/char/diag/diag_debugfs.c @@ -38,6 +38,7 @@ #include "diag_ipc_logging.h" #define DEBUG_BUF_SIZE 4096 +#define CMD_SIZE 10 static struct dentry *diag_dbgfs_dent; static int diag_dbgfs_table_index; static int diag_dbgfs_mempool_index; @@ -689,15 +690,14 @@ static ssize_t diag_dbgfs_read_rpmsginfo(struct file *file, char __user *ubuf, static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf, size_t count, loff_t *ppos) { - const int size = 10; - unsigned char cmd[size]; + unsigned char cmd[CMD_SIZE]; long value = 0; int len = 0; if (count < 1) return -EINVAL; - len = (count < (size - 1)) ? count : size - 1; + len = (count < (CMD_SIZE - 1)) ? count : CMD_SIZE - 1; if (copy_from_user(cmd, buf, len)) return -EFAULT; diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index e5c7aa8b9c0ef84ea26aecd78c7e152fee4a6369..b3e86d8ad8621b68fdff70ee6b4472b60e5e5c0d 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -290,7 +290,7 @@ static void usb_read_work_fn(struct work_struct *work) pr_debug("diag: In %s, error in reading from USB %s, err: %d\n", __func__, ch->name, err); atomic_set(&ch->read_pending, 0); - if (err != -EIO) + if (err != -EIO && err != -ESHUTDOWN) queue_work(ch->usb_wq, &(ch->read_work)); } } else { diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index fb5bee20ef8c7347f799823bdd317b3db53f77cb..767b81fbb502009a7457ac5dcf83a4b74bc28a7d 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -3983,7 +3983,7 @@ static int __init diagchar_init(void) driver->mask_check = 0; driver->in_busy_pktdata = 0; driver->in_busy_dcipktdata = 0; - driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); + driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, TYPE_CMD); hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1); hdlc_data.len = 0; non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1); diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index 4a1de090281815e350ce377cc306a6d2ae490a42..4286bf83394080bc3cbc3da3ca62fc12342dfd10 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1798,9 +1798,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt, "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n", peripheral, type, num); diagfwd_write_done(peripheral, type, num); - } else if (peripheral == APPS_DATA || - (peripheral >= 0 && peripheral < NUM_PERIPHERALS && - num == TYPE_CMD)) { + } else if ((peripheral >= 0 && + peripheral <= NUM_PERIPHERALS + NUM_UPD) && + num == TYPE_CMD) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n", peripheral, type, num); diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c index 9e305e80e1dcc76375dd4e97a4c64727304ad635..55a16b2a26e1a6d59af95270f01666012065244f 100644 --- a/drivers/char/diag/diagfwd_bridge.c +++ b/drivers/char/diag/diagfwd_bridge.c @@ -319,7 +319,9 @@ uint16_t diag_get_remote_device_mask(void) for (i = 0; i < NUM_REMOTE_DEV; i++) { if (bridge_info[i].inited && - bridge_info[i].type == DIAG_DATA_TYPE) { + bridge_info[i].type == DIAG_DATA_TYPE && + (bridge_info[i].dev_ops->remote_proc_check && + bridge_info[i].dev_ops->remote_proc_check())) { remote_dev |= 1 << i; } } diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h index ec3eb073b7126433c60e25f620e01ef52969faf9..c8043c46d5eab53bf11dcb839d69319a29ff542b 100644 --- a/drivers/char/diag/diagfwd_bridge.h +++ b/drivers/char/diag/diagfwd_bridge.h @@ -33,6 +33,7 @@ struct diag_remote_dev_ops { int (*queue_read)(int id); int (*write)(int id, unsigned char *buf, int len, int ctxt); int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt); + int (*remote_proc_check)(void); }; struct diagfwd_bridge_info { diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c index b2c8a7a214b1b01bb35683362dfab0ad12da9b5d..0a291bb3cb6a8bbedfcc1d78feb69e9a69addf0a 100644 --- a/drivers/char/diag/diagfwd_hsic.c +++ b/drivers/char/diag/diagfwd_hsic.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -392,6 +392,7 @@ static struct diag_remote_dev_ops diag_hsic_fwd_ops = { .queue_read = hsic_queue_read, .write = hsic_write, .fwd_complete = hsic_fwd_complete, + .remote_proc_check = NULL, }; int diag_hsic_init(void) diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c index 5c0e85f6833ea8fba64205afdad70932edd8ce4a..6c83706940ef8891051e4148f90073e6ae8d352b 100644 --- a/drivers/char/diag/diagfwd_mhi.c +++ b/drivers/char/diag/diagfwd_mhi.c @@ -215,14 +215,11 @@ static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag) atomic_set(&(mhi_info->read_ch.opened), 0); atomic_set(&(mhi_info->write_ch.opened), 0); - if (close_flag == CLOSE_CHANNELS) - mhi_unprepare_from_transfer(mhi_info->mhi_dev); - if (!(atomic_read(&(mhi_info->read_ch.opened)))) - flush_workqueue(mhi_info->mhi_wq); + flush_workqueue(mhi_info->mhi_wq); - if (!(atomic_read(&(mhi_info->write_ch.opened)))) - flush_workqueue(mhi_info->mhi_wq); + if (close_flag == CLOSE_CHANNELS) + mhi_unprepare_from_transfer(mhi_info->mhi_dev); mhi_buf_tbl_clear(mhi_info); diag_remote_dev_close(mhi_info->dev_id); @@ -515,6 +512,11 @@ static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt) return 0; } +static int mhi_remote_proc_check(void) +{ + return diag_mhi[MHI_1].enabled; +} + static struct diag_mhi_info *diag_get_mhi_info(struct mhi_device *mhi_dev) { struct diag_mhi_info *mhi_info = NULL; @@ -635,6 +637,7 @@ static struct diag_remote_dev_ops diag_mhi_fwd_ops = { .queue_read = mhi_queue_read, .write = mhi_write, .fwd_complete = mhi_fwd_complete, + .remote_proc_check = mhi_remote_proc_check, }; static void diag_mhi_dev_exit(int dev) diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 772b05bed3fe20e6edd0c16cc62e24721ae2afd6..669b6e30ba7b626f8a394a0c0d64d9544892cb62 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -1304,9 +1304,6 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info) if (!fwd_info) return -EIO; - if (fwd_info->type == TYPE_CNTL) - flush_workqueue(driver->cntl_wq); - mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); fwd_info->ch_open = 0; if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close) diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c index 085b6062d4ce271f1922a17a93cc639fc7b23d98..0ad8b35c318df2e8706f3718a9dd44e6a03c58a8 100644 --- a/drivers/char/diag/diagfwd_rpmsg.c +++ b/drivers/char/diag/diagfwd_rpmsg.c @@ -499,7 +499,6 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work) close_work); if (!rpmsg_info || !rpmsg_info->inited || !rpmsg_info->hdl) return; - atomic_set(&rpmsg_info->opened, 0); rpmsg_info->hdl = NULL; diagfwd_channel_close(rpmsg_info->fwd_ctxt); } @@ -770,8 +769,10 @@ static void diag_rpmsg_remove(struct rpmsg_device *rpdev) return; rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name); - if (rpmsg_info) + if (rpmsg_info) { + atomic_set(&rpmsg_info->opened, 0); queue_work(rpmsg_info->wq, &rpmsg_info->close_work); + } } static struct rpmsg_device_id rpmsg_diag_table[] = { diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c index 9646133915677ab0df152969670fc91bdb7c928b..4465cc470a935aa84cf6bc22db2662c974ef9a36 100644 --- a/drivers/char/diag/diagfwd_smux.c +++ b/drivers/char/diag/diagfwd_smux.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2014, 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -275,6 +275,7 @@ static struct diag_remote_dev_ops diag_smux_fwd_ops = { .queue_read = smux_queue_read, .write = smux_write, .fwd_complete = smux_fwd_complete, + .remote_proc_check = NULL, }; int diag_smux_init(void) diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c index f959a332ccd3ef6ddd045e12141551c50d2f3d06..b0f5d2625535381571b23c3cd8079dbeb067823f 100644 --- a/drivers/char/diag/diagfwd_socket.c +++ b/drivers/char/diag/diagfwd_socket.c @@ -243,7 +243,6 @@ static int restart_notifier_cb(struct notifier_block *this, unsigned long code, return NOTIFY_DONE; } - mutex_lock(&driver->diag_notifier_mutex); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s: ssr for processor %d ('%s')\n", __func__, notifier->processor, notifier->name); @@ -253,7 +252,12 @@ static int restart_notifier_cb(struct notifier_block *this, unsigned long code, case SUBSYS_BEFORE_SHUTDOWN: DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__); - bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN; + mutex_lock(&driver->diag_notifier_mutex); + bootup_req[notifier->processor] = PERIPHERAL_SSR_DOWN; + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: bootup_req[%s] = %d\n", + notifier->name, (int)bootup_req[notifier->processor]); + mutex_unlock(&driver->diag_notifier_mutex); break; case SUBSYS_AFTER_SHUTDOWN: @@ -269,11 +273,20 @@ static int restart_notifier_cb(struct notifier_block *this, unsigned long code, case SUBSYS_AFTER_POWERUP: DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: %s: SUBSYS_AFTER_POWERUP\n", __func__); + mutex_lock(&driver->diag_notifier_mutex); if (!bootup_req[notifier->processor]) { - bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN; + bootup_req[notifier->processor] = PERIPHERAL_SSR_DOWN; + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: bootup_req[%s] = %d\n", + notifier->name, (int)bootup_req[notifier->processor]); + mutex_unlock(&driver->diag_notifier_mutex); break; } - bootup_req[notifier->processor] = PEPIPHERAL_SSR_UP; + bootup_req[notifier->processor] = PERIPHERAL_SSR_UP; + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: bootup_req[%s] = %d\n", + notifier->name, (int)bootup_req[notifier->processor]); + mutex_unlock(&driver->diag_notifier_mutex); break; default: @@ -281,11 +294,6 @@ static int restart_notifier_cb(struct notifier_block *this, unsigned long code, "diag: code: %lu\n", code); break; } - mutex_unlock(&driver->diag_notifier_mutex); - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: bootup_req[%s] = %d\n", - notifier->name, (int)bootup_req[notifier->processor]); - return NOTIFY_DONE; } @@ -497,13 +505,6 @@ static void __socket_close_channel(struct diag_socket_info *info) if (!info || !info->hdl) return; - if (bootup_req[info->peripheral] == PEPIPHERAL_SSR_UP) { - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "diag: %s is up, stopping cleanup: bootup_req = %d\n", - info->name, (int)bootup_req[info->peripheral]); - return; - } - memset(&info->remote_addr, 0, sizeof(info->remote_addr)); diagfwd_channel_close(info->fwd_ctxt); @@ -621,8 +622,15 @@ static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len) info->name); mutex_lock(&driver->diag_notifier_mutex); - socket_close_channel(info); + if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: %s is up, stopping cleanup: bootup_req = %d\n", + info->name, (int)bootup_req[info->peripheral]); + mutex_unlock(&driver->diag_notifier_mutex); + break; + } mutex_unlock(&driver->diag_notifier_mutex); + socket_close_channel(info); } break; case QRTR_TYPE_DEL_CLIENT: @@ -635,8 +643,15 @@ static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len) info->name); mutex_lock(&driver->diag_notifier_mutex); - socket_close_channel(info); + if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: %s is up, stopping cleanup: bootup_req = %d\n", + info->name, (int)bootup_req[info->peripheral]); + mutex_unlock(&driver->diag_notifier_mutex); + break; + } mutex_unlock(&driver->diag_notifier_mutex); + socket_close_channel(info); } break; } @@ -741,24 +756,34 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) qrtr_ctrl_recd += read_len; continue; } - - if (!atomic_read(&info->opened) && - info->port_type == PORT_TYPE_SERVER) { - /* - * This is the first packet from the client. Copy its - * address to the connection object. Consider this - * channel open for communication. - */ + if (info->type == TYPE_CNTL) { memcpy(&info->remote_addr, &src_addr, sizeof(src_addr)); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "%s first client [0x%x:0x%x]\n", - info->name, src_addr.sq_node, - src_addr.sq_port); + "%s client node:port::[0x%x]:[0x%x]\n", + info->name, src_addr.sq_node, src_addr.sq_port); - if (info->ins_id == INST_ID_DCI) - atomic_set(&info->opened, 1); - else + if (!atomic_read(&info->opened)) __socket_open_channel(info); + } else { + if (!atomic_read(&info->opened) && + info->port_type == PORT_TYPE_SERVER) { + /* + * This is the first packet from the client. + * Copy its address to the connection object. + * Consider this channel open for communication. + */ + memcpy(&info->remote_addr, &src_addr, + sizeof(src_addr)); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "%s client node:port::[0x%x]:[0x%x]\n", + info->name, src_addr.sq_node, + src_addr.sq_port); + + if (info->ins_id == INST_ID_DCI) + atomic_set(&info->opened, 1); + else + __socket_open_channel(info); + } } temp += read_len; total_recd += read_len; diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h index 810bd4bb68b87f5e12a5c5ed195942df8f2a6e76..1a23fd76a7e0ca6aeec48a4e686e2c9cba63c145 100644 --- a/drivers/char/diag/diagfwd_socket.h +++ b/drivers/char/diag/diagfwd_socket.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,9 +21,9 @@ #define PORT_TYPE_SERVER 0 #define PORT_TYPE_CLIENT 1 -#define PEPIPHERAL_AFTER_BOOT 0 -#define PEPIPHERAL_SSR_DOWN 1 -#define PEPIPHERAL_SSR_UP 2 +#define PERIPHERAL_AFTER_BOOT 0 +#define PERIPHERAL_SSR_DOWN 1 +#define PERIPHERAL_SSR_UP 2 enum { SOCKET_MODEM, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 508a4db60dd0e5067d4400a32dde2de050c937b2..5708007fc729ac071b0b4dc00311a090d9bb1b58 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -77,6 +77,7 @@ struct clk_core { unsigned long max_rate; unsigned long accuracy; int phase; + struct clk_duty duty; struct hlist_head children; struct hlist_node child_node; struct hlist_head clks; @@ -2304,6 +2305,163 @@ int clk_get_phase(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_get_phase); +static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) +{ + /* Assume a default value of 50% */ + core->duty.num = 1; + core->duty.den = 2; +} + +static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); + +static int clk_core_update_duty_cycle_nolock(struct clk_core *core) +{ + struct clk_duty *duty = &core->duty; + int ret = 0; + + if (!core->ops->get_duty_cycle) + return clk_core_update_duty_cycle_parent_nolock(core); + + ret = core->ops->get_duty_cycle(core->hw, duty); + if (ret) + goto reset; + + /* Don't trust the clock provider too much */ + if (duty->den == 0 || duty->num > duty->den) { + ret = -EINVAL; + goto reset; + } + + return 0; + +reset: + clk_core_reset_duty_cycle_nolock(core); + return ret; +} + +static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) +{ + int ret = 0; + + if (core->parent && + core->flags & CLK_DUTY_CYCLE_PARENT) { + ret = clk_core_update_duty_cycle_nolock(core->parent); + memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); + } else { + clk_core_reset_duty_cycle_nolock(core); + } + + return ret; +} + +static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, + struct clk_duty *duty); + +static int clk_core_set_duty_cycle_nolock(struct clk_core *core, + struct clk_duty *duty) +{ + int ret; + + lockdep_assert_held(&prepare_lock); + + trace_clk_set_duty_cycle(core, duty); + + if (!core->ops->set_duty_cycle) + return clk_core_set_duty_cycle_parent_nolock(core, duty); + + ret = core->ops->set_duty_cycle(core->hw, duty); + if (!ret) + memcpy(&core->duty, duty, sizeof(*duty)); + + trace_clk_set_duty_cycle_complete(core, duty); + + return ret; +} + +static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, + struct clk_duty *duty) +{ + int ret = 0; + + if (core->parent && + core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { + ret = clk_core_set_duty_cycle_nolock(core->parent, duty); + memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); + } + + return ret; +} + +/** + * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal + * @clk: clock signal source + * @num: numerator of the duty cycle ratio to be applied + * @den: denominator of the duty cycle ratio to be applied + * + * Apply the duty cycle ratio if the ratio is valid and the clock can + * perform this operation + * + * Returns (0) on success, a negative errno otherwise. + */ +int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) +{ + int ret; + struct clk_duty duty; + + if (!clk) + return 0; + + /* sanity check the ratio */ + if (den == 0 || num > den) + return -EINVAL; + + duty.num = num; + duty.den = den; + + clk_prepare_lock(); + + ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); + + clk_prepare_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_duty_cycle); + +static int clk_core_get_scaled_duty_cycle(struct clk_core *core, + unsigned int scale) +{ + struct clk_duty *duty = &core->duty; + int ret; + + clk_prepare_lock(); + + ret = clk_core_update_duty_cycle_nolock(core); + if (!ret) + ret = mult_frac(scale, duty->num, duty->den); + + clk_prepare_unlock(); + + return ret; +} + +/** + * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal + * @clk: clock signal source + * @scale: scaling factor to be applied to represent the ratio as an integer + * + * Returns the duty cycle ratio of a clock node multiplied by the provided + * scaling factor, or negative errno on error. + */ +int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) +{ + if (!clk) + return 0; + + return clk_core_get_scaled_duty_cycle(clk->core, scale); +} +EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); + /** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q @@ -2420,11 +2578,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, clk_core_get_rate(c), - clk_core_get_accuracy(c), clk_core_get_phase(c)); + clk_core_get_accuracy(c), clk_core_get_phase(c), + clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2435,10 +2594,14 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, if (!c) return; + if (c->ops->bus_vote) + c->ops->bus_vote(c->hw, true); clk_summary_show_one(s, c, level); hlist_for_each_entry(child, &c->children, child_node) clk_summary_show_subtree(s, child, level + 1); + if (c->ops->bus_vote) + c->ops->bus_vote(c->hw, false); } static int clk_summary_show(struct seq_file *s, void *data) @@ -2446,8 +2609,9 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------\n"); + seq_puts(s, " enable prepare duty\n"); + seq_puts(s, " clock count count rate accuracy phase cycle\n"); + seq_puts(s, "-------------------------------------------------------------------------------\n"); clk_prepare_lock(); @@ -2485,6 +2649,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); + seq_printf(s, "\"duty_cycle\": %u", + clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) @@ -2494,6 +2660,9 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) if (!c) return; + if (c->ops->bus_vote) + c->ops->bus_vote(c->hw, true); + clk_dump_one(s, c, level); hlist_for_each_entry(child, &c->children, child_node) { @@ -2502,6 +2671,9 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) } seq_putc(s, '}'); + + if (c->ops->bus_vote) + c->ops->bus_vote(c->hw, false); } static int clk_dump(struct seq_file *s, void *data) @@ -2717,9 +2889,10 @@ static void clock_debug_print_enabled_clocks(struct seq_file *s) struct clk_core *core; int cnt = 0; - clock_debug_output(s, 0, "Enabled clocks:\n"); + if (!mutex_trylock(&clk_debug_lock)) + return; - mutex_lock(&clk_debug_lock); + clock_debug_output(s, 0, "Enabled clocks:\n"); hlist_for_each_entry(core, &clk_debug_list, debug_node) cnt += clock_debug_print_clock(core, s); @@ -2770,9 +2943,20 @@ EXPORT_SYMBOL(clk_debug_print_hw); static int print_hw_show(struct seq_file *m, void *unused) { struct clk_core *c = m->private; + struct clk_core *clk; + + clk_prepare_lock(); + for (clk = c; clk; clk = clk->parent) + if (clk->ops->bus_vote) + clk->ops->bus_vote(clk->hw, true); clk_debug_print_hw(c, m); + for (clk = c; clk; clk = clk->parent) + if (clk->ops->bus_vote) + clk->ops->bus_vote(c->hw, false); + clk_prepare_unlock(); + return 0; } @@ -2899,6 +3083,28 @@ static const struct file_operations rate_max_fops = { .release = seq_release, }; +static int clk_duty_cycle_show(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + struct clk_duty *duty = &core->duty; + + seq_printf(s, "%u/%u\n", duty->num, duty->den); + + return 0; +} + +static int clk_duty_cycle_open(struct inode *inode, struct file *file) +{ + return single_open(file, clk_duty_cycle_show, inode->i_private); +} + +static const struct file_operations clk_duty_cycle_fops = { + .open = clk_duty_cycle_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) { struct dentry *d; @@ -2977,6 +3183,11 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) if (!d) goto err_out; + d = debugfs_create_file("clk_duty_cycle", 0444, core->dentry, + core, &clk_duty_cycle_fops); + if (!d) + goto err_out; + if (core->ops->debug_init) { ret = core->ops->debug_init(core->hw, core->dentry); if (ret) @@ -3050,14 +3261,19 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file); /* * Print the names of all enabled clocks and their parents if - * debug_suspend is set from debugfs. + * debug_suspend is set from debugfs along with print_parent flag set to 1. + * Otherwise if print_parent set to 0, print only enabled clocks + * */ -void clock_debug_print_enabled(void) +void clock_debug_print_enabled(bool print_parent) { if (likely(!debug_suspend)) return; - clock_debug_print_enabled_debug_suspend(NULL); + if (print_parent) + clock_debug_print_enabled_clocks(NULL); + else + clock_debug_print_enabled_debug_suspend(NULL); } EXPORT_SYMBOL_GPL(clock_debug_print_enabled); @@ -3250,6 +3466,11 @@ static int __clk_core_init(struct clk_core *core) else core->phase = 0; + /* + * Set clk's duty cycle. + */ + clk_core_update_duty_cycle_nolock(core); + /* * Set clk's rate. The preferred method is to use .recalc_rate. For * simple clocks and lazy developers the default fallback is to use the diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h index 10fec8ad7dc9526454d2030ab66dd5fa8fe91b00..3fcdc7fc1d0cf20758eebb4875a6a6596541686c 100644 --- a/drivers/clk/clk.h +++ b/drivers/clk/clk.h @@ -23,7 +23,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, void __clk_free_clk(struct clk *clk); /* Debugfs API to print the enabled clocks */ -void clock_debug_print_enabled(void); +void clock_debug_print_enabled(bool print_parent); void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f); #define WARN_CLK(core, name, cond, fmt, ...) do { \ diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 74c68391051f3d63fe930d4dfc7a4a044e5ee097..ed70cbe590fdfd0ef4a96bbeeacb1ea37f62f6ae 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -466,3 +466,13 @@ config MSM_DEBUGCC_SDMMAGPIE Support for the debug clock controller on Qualcomm Technologies, Inc SDMMAGPIE devices. Say Y if you want to support the clock measurement functionality. + +config GCC_SDXPRAIRIE + tristate "SDXPRAIRIE Global Clock Controller" + select QCOM_GDSC + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on Qualcomm Technologies, Inc + SDXPRAIRIE devices. + Say Y if you want to use peripheral devices such as UART, SPI, I2C, + USB, SD/eMMC, PCIe, etc. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index fba12da98c02d071ddde65ac58f89db3cf578f4a..59cfe7d30244c8dee53464a903901093a94796eb 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o obj-$(CONFIG_CLOCK_CPU_QCS405) += clk-cpu-qcs405.o +obj-$(CONFIG_GCC_SDXPRAIRIE) += gcc-sdxprairie.o obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c index 0e10133e04bcb2aec1eed3dee5e4e95e63dad9f0..1f5bbfd8f1ddd0ca7462d9da5f0be03953df194d 100644 --- a/drivers/clk/qcom/camcc-sm8150.c +++ b/drivers/clk/qcom/camcc-sm8150.c @@ -24,8 +24,10 @@ #include #include #include +#include #include +#include #include "common.h" #include "clk-regmap.h" @@ -38,9 +40,41 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } +#define MSM_BUS_VECTOR(_src, _dst, _ab, _ib) \ +{ \ + .src = _src, \ + .dst = _dst, \ + .ab = _ab, \ + .ib = _ib, \ +} + static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner); +static struct msm_bus_vectors clk_debugfs_vectors[] = { + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_CAMERA_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_CAMERA_CFG, 0, 1), +}; + +static struct msm_bus_paths clk_debugfs_usecases[] = { + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[0], + }, + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[1], + } +}; + +static struct msm_bus_scale_pdata clk_debugfs_scale_table = { + .usecase = clk_debugfs_usecases, + .num_usecases = ARRAY_SIZE(clk_debugfs_usecases), + .name = "clk_camcc_debugfs", +}; + enum { P_BI_TCXO, P_BI_TCXO_MX, @@ -2434,6 +2468,8 @@ static int cam_cc_sm8150_probe(struct platform_device *pdev) struct regmap *regmap; struct clk *clk; int ret = 0; + int i; + unsigned int camcc_bus_id; regmap = qcom_cc_map(pdev, &cam_cc_sm8150_desc); if (IS_ERR(regmap)) { @@ -2464,6 +2500,19 @@ static int cam_cc_sm8150_probe(struct platform_device *pdev) "Unable to get vdd_mm regulator\n"); return PTR_ERR(vdd_mm.regulator[0]); } + vdd_mm.use_max_uV = true; + + camcc_bus_id = msm_bus_scale_register_client(&clk_debugfs_scale_table); + if (!camcc_bus_id) { + dev_err(&pdev->dev, "Unable to register for bw voting\n"); + return -EPROBE_DEFER; + } + + for (i = 0; i < ARRAY_SIZE(cam_cc_sm8150_clocks); i++) + if (cam_cc_sm8150_clocks[i]) + *(unsigned int *)(void *) + &cam_cc_sm8150_clocks[i]->hw.init->bus_cl_id = + camcc_bus_id; ret = cam_cc_sm8150_fixup(pdev, regmap); if (ret) diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index c0add060eda32565fca078ce102488952117666c..837c62060cae7bd03bf92567e61d7fc48054eaac 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -20,6 +20,7 @@ #include "clk-alpha-pll.h" #include "common.h" +#include "clk-debug.h" #define PLL_MODE 0x00 #define PLL_OUTCTRL BIT(0) @@ -136,6 +137,33 @@ #define AGERA_PLL_TEST_CTL_U 0x1c #define AGERA_PLL_POST_DIV_MASK 0x3 +/* LUCID PLL speficic settings and offsets */ +#define LUCID_PLL_OFF_L_VAL 0x04 +#define LUCID_PLL_OFF_CAL_L_VAL 0x08 +#define LUCID_PLL_OFF_USER_CTL 0x0c +#define LUCID_PLL_OFF_USER_CTL_U 0x10 +#define LUCID_PLL_OFF_USER_CTL_U1 0x14 +#define LUCID_PLL_OFF_CONFIG_CTL 0x18 +#define LUCID_PLL_OFF_CONFIG_CTL_U 0x1c +#define LUCID_PLL_OFF_CONFIG_CTL_U1 0x20 +#define LUCID_PLL_OFF_TEST_CTL 0x24 +#define LUCID_PLL_OFF_TEST_CTL_U 0x28 +#define LUCID_PLL_OFF_TEST_CTL_U1 0x2c +#define LUCID_PLL_OFF_STATUS 0x30 +#define LUCID_PLL_OFF_OPMODE 0x38 +#define LUCID_PLL_OFF_ALPHA_VAL 0x40 +#define LUCID_PLL_OFF_FRAC 0x40 + +#define LUCID_PLL_CAL_VAL 0x44 +#define LUCID_PLL_STANDBY 0x0 +#define LUCID_PLL_RUN 0x1 +#define LUCID_PLL_OUT_MASK 0x7 +#define LUCID_PCAL_DONE BIT(26) +#define LUCID_PLL_RATE_MARGIN 500 +#define LUCID_PLL_ACK_LATCH BIT(29) +#define LUCID_PLL_UPDATE BIT(22) +#define LUCID_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23) + #define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \ struct clk_alpha_pll, clkr) @@ -452,7 +480,8 @@ static unsigned long alpha_pll_calc_rate(const struct clk_alpha_pll *pll, int alpha_bw = ALPHA_BITWIDTH; if (pll->type == TRION_PLL || pll->type == REGERA_PLL - || pll->type == FABIA_PLL || pll->type == AGERA_PLL) + || pll->type == FABIA_PLL || pll->type == AGERA_PLL + || pll->type == LUCID_PLL) alpha_bw = ALPHA_REG_16BITWIDTH; return (prate * l) + ((prate * a) >> alpha_bw); @@ -489,7 +518,8 @@ alpha_pll_round_rate(const struct clk_alpha_pll *pll, unsigned long rate, * the fractional divider. */ if (pll->type == TRION_PLL || pll->type == REGERA_PLL - || pll->type == FABIA_PLL || pll->type == AGERA_PLL) + || pll->type == FABIA_PLL || pll->type == AGERA_PLL + || pll->type == LUCID_PLL) alpha_bw = ALPHA_REG_16BITWIDTH; /* Upper ALPHA_BITWIDTH bits of Alpha */ @@ -1297,6 +1327,7 @@ const struct clk_ops clk_pll_sleep_vote_ops = { .enable = clk_enable_regmap, .disable = clk_disable_regmap, .list_registers = clk_alpha_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_pll_sleep_vote_ops); @@ -1308,6 +1339,7 @@ const struct clk_ops clk_alpha_pll_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_alpha_pll_set_rate, .list_registers = clk_alpha_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_ops); @@ -1319,6 +1351,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_alpha_pll_set_rate, .list_registers = clk_alpha_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops); @@ -1331,6 +1364,7 @@ const struct clk_ops clk_trion_pll_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_trion_pll_set_rate, .list_registers = clk_trion_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_trion_pll_ops); @@ -1341,6 +1375,7 @@ const struct clk_ops clk_trion_fixed_pll_ops = { .recalc_rate = clk_trion_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, .list_registers = clk_trion_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops); @@ -1352,6 +1387,7 @@ const struct clk_ops clk_regera_pll_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_regera_pll_set_rate, .list_registers = clk_regera_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_regera_pll_ops); @@ -1417,6 +1453,7 @@ const struct clk_ops clk_alpha_pll_postdiv_ops = { .recalc_rate = clk_alpha_pll_postdiv_recalc_rate, .round_rate = clk_alpha_pll_postdiv_round_rate, .set_rate = clk_alpha_pll_postdiv_set_rate, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops); @@ -1498,6 +1535,7 @@ const struct clk_ops clk_trion_pll_postdiv_ops = { .recalc_rate = clk_trion_pll_postdiv_recalc_rate, .round_rate = clk_trion_pll_postdiv_round_rate, .set_rate = clk_trion_pll_postdiv_set_rate, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_trion_pll_postdiv_ops); @@ -1704,6 +1742,7 @@ const struct clk_ops clk_alpha_pll_slew_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_alpha_pll_slew_set_rate, .list_registers = clk_alpha_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_alpha_pll_slew_ops); @@ -2047,6 +2086,7 @@ const struct clk_ops clk_fabia_pll_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_fabia_pll_set_rate, .list_registers = clk_fabia_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_fabia_pll_ops); @@ -2056,6 +2096,7 @@ const struct clk_ops clk_fabia_fixed_pll_ops = { .recalc_rate = clk_fabia_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, .list_registers = clk_fabia_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_fabia_fixed_pll_ops); @@ -2137,6 +2178,7 @@ const struct clk_ops clk_generic_pll_postdiv_ops = { .recalc_rate = clk_generic_pll_postdiv_recalc_rate, .round_rate = clk_generic_pll_postdiv_round_rate, .set_rate = clk_generic_pll_postdiv_set_rate, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_generic_pll_postdiv_ops); @@ -2274,5 +2316,330 @@ const struct clk_ops clk_agera_pll_ops = { .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_agera_pll_set_rate, .list_registers = clk_agera_pll_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL(clk_agera_pll_ops); + +static int lucid_pll_is_enabled(struct clk_alpha_pll *pll, + struct regmap *regmap) +{ + u32 mode_regval, opmode_regval; + int ret; + + ret = regmap_read(regmap, pll->offset + PLL_MODE, &mode_regval); + ret |= regmap_read(regmap, pll->offset + LUCID_PLL_OFF_OPMODE, + &opmode_regval); + if (ret) + return 0; + + return ((opmode_regval & LUCID_PLL_RUN) && + (mode_regval & PLL_OUTCTRL)); +} + +int clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + int ret; + + if (lucid_pll_is_enabled(pll, regmap)) { + pr_warn("PLL is already enabled. Skipping configuration.\n"); + return 0; + } + + /* + * Disable the PLL if it's already been initialized. Not doing so might + * lead to the PLL running with the old frequency configuration. + */ + if (pll->inited) { + ret = regmap_update_bits(regmap, pll->offset + PLL_MODE, + PLL_RESET_N, 0); + if (ret) + return ret; + } + + if (config->l) + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_L_VAL, + config->l); + + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_CAL_L_VAL, + LUCID_PLL_CAL_VAL); + + if (config->alpha) + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_ALPHA_VAL, + config->alpha); + if (config->config_ctl_val) + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_CONFIG_CTL, + config->config_ctl_val); + + if (config->config_ctl_hi_val) + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_CONFIG_CTL_U, + config->config_ctl_hi_val); + + if (config->config_ctl_hi1_val) + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_USER_CTL_U1, + config->config_ctl_hi1_val); + + if (config->post_div_mask) + regmap_update_bits(regmap, pll->offset + LUCID_PLL_OFF_USER_CTL, + config->post_div_mask, config->post_div_val); + + regmap_update_bits(regmap, pll->offset + PLL_MODE, + LUCID_PLL_HW_UPDATE_LOGIC_BYPASS, + LUCID_PLL_HW_UPDATE_LOGIC_BYPASS); + + /* Disable PLL output */ + ret = regmap_update_bits(regmap, pll->offset + PLL_MODE, + PLL_OUTCTRL, 0); + if (ret) + return ret; + + /* Set operation mode to OFF */ + regmap_write(regmap, pll->offset + LUCID_PLL_OFF_OPMODE, + LUCID_PLL_STANDBY); + + /* PLL should be in OFF mode before continuing */ + wmb(); + + /* Place the PLL in STANDBY mode */ + ret = regmap_update_bits(regmap, pll->offset + PLL_MODE, + PLL_RESET_N, PLL_RESET_N); + if (ret) + return ret; + + pll->inited = true; + return 0; +} + +static int alpha_pll_lucid_enable(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 val; + int ret; + + ret = regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val); + if (ret) + return ret; + + /* If in FSM mode, just vote for it */ + if (val & PLL_VOTE_FSM_ENA) { + ret = clk_enable_regmap(hw); + if (ret) + return ret; + return wait_for_pll_enable_active(pll); + } + + if (unlikely(!pll->inited)) { + ret = clk_lucid_pll_configure(pll, pll->clkr.regmap, + pll->config); + if (ret) { + pr_err("Failed to configure %s\n", clk_hw_get_name(hw)); + return ret; + } + } + + /* Set operation mode to RUN */ + regmap_write(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_OPMODE, + LUCID_PLL_RUN); + + ret = wait_for_pll_enable_lock(pll); + if (ret) + return ret; + + /* Enable the PLL outputs */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + + LUCID_PLL_OFF_USER_CTL, + LUCID_PLL_OUT_MASK, LUCID_PLL_OUT_MASK); + if (ret) + return ret; + + /* Enable the global PLL outputs */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE, + PLL_OUTCTRL, PLL_OUTCTRL); + if (ret) + return ret; + + /* Ensure that the write above goes through before returning. */ + mb(); + return ret; +} + +static void alpha_pll_lucid_disable(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 val; + int ret; + + ret = regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val); + if (ret) + return; + + /* If in FSM mode, just unvote it */ + if (val & PLL_VOTE_FSM_ENA) { + clk_disable_regmap(hw); + return; + } + + /* Disable the global PLL output */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE, + PLL_OUTCTRL, 0); + if (ret) + return; + + /* Disable the PLL outputs */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + + LUCID_PLL_OFF_USER_CTL, + LUCID_PLL_OUT_MASK, 0); + if (ret) + return; + + /* Place the PLL mode in STANDBY */ + regmap_write(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_OPMODE, + LUCID_PLL_STANDBY); + + regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE, + PLL_RESET_N, PLL_RESET_N); +} + +/* + * The Lucid PLL requires a power-on self-calibration which happens when the + * PLL comes out of reset. The calibration is performed at an output frequency + * of ~1300 MHz which means that SW will have to vote on a voltage that's + * equal to or greater than SVS_L1 on the corresponding rail. Since this is not + * feasable to do in the atomic enable path, temporarily bring up the PLL here, + * let it calibrate, and place it in standby before returning. + */ +static int alpha_pll_lucid_prepare(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + struct clk_hw *p; + u32 regval; + unsigned long prate; + int ret; + + /* Return early if calibration is not needed. */ + ret = regmap_read(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_STATUS, + ®val); + if (regval & LUCID_PCAL_DONE) + return ret; + + p = clk_hw_get_parent(hw); + if (!p) + return -EINVAL; + + prate = clk_hw_get_rate(p); + ret = clk_vote_rate_vdd(hw->core, LUCID_PLL_CAL_VAL * prate); + if (ret) + return ret; + + ret = alpha_pll_lucid_enable(hw); + if (ret) + goto ret_path; + + alpha_pll_lucid_disable(hw); +ret_path: + clk_unvote_rate_vdd(hw->core, LUCID_PLL_CAL_VAL * prate); + return 0; +} + +static unsigned long +alpha_pll_lucid_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l, frac; + + regmap_read(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_L_VAL, &l); + regmap_read(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_ALPHA_VAL, + &frac); + + return alpha_pll_calc_rate(pll, parent_rate, l, frac); +} + +static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + unsigned long rrate; + u32 regval, l; + u64 a; + int ret; + + rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a); + + /* + * Due to a limited number of bits for fractional rate programming, the + * rounded up rate could be marginally higher than the requested rate. + */ + if (rrate > (rate + LUCID_PLL_RATE_MARGIN) || rrate < rate) { + pr_err("Call set rate on the PLL with rounded rates!\n"); + return -EINVAL; + } + + regmap_write(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_L_VAL, l); + regmap_write(pll->clkr.regmap, pll->offset + LUCID_PLL_OFF_ALPHA_VAL, + a); + + /* Latch the PLL input */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE, + LUCID_PLL_UPDATE, LUCID_PLL_UPDATE); + if (ret) + return ret; + + /* Wait for 2 reference cycles before checking the ACK bit. */ + udelay(1); + regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, ®val); + if (!(regval & LUCID_PLL_ACK_LATCH)) { + WARN(1, "PLL latch failed. Output may be unstable!\n"); + return -EINVAL; + } + + /* Return the latch input to 0 */ + ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE, + LUCID_PLL_UPDATE, 0); + if (ret) + return ret; + + if (clk_hw_is_enabled(hw)) { + ret = wait_for_pll_enable_lock(pll); + if (ret) + return ret; + } + + /* Wait for PLL output to stabilize */ + udelay(100); + return 0; +} + +static int alpha_pll_lucid_is_enabled(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + + return lucid_pll_is_enabled(pll, pll->clkr.regmap); +} + +const struct clk_ops clk_alpha_pll_lucid_ops = { + .prepare = alpha_pll_lucid_prepare, + .enable = alpha_pll_lucid_enable, + .disable = alpha_pll_lucid_disable, + .is_enabled = alpha_pll_lucid_is_enabled, + .recalc_rate = alpha_pll_lucid_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, + .set_rate = alpha_pll_lucid_set_rate, + .list_registers = clk_alpha_pll_list_registers, +}; +EXPORT_SYMBOL(clk_alpha_pll_lucid_ops); + +const struct clk_ops clk_alpha_pll_fixed_lucid_ops = { + .enable = alpha_pll_lucid_enable, + .disable = alpha_pll_lucid_disable, + .is_enabled = alpha_pll_lucid_is_enabled, + .recalc_rate = alpha_pll_lucid_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, +}; +EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_ops); + +const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = { + .recalc_rate = clk_alpha_pll_postdiv_recalc_rate, + .round_rate = clk_alpha_pll_postdiv_round_rate, + .set_rate = clk_alpha_pll_postdiv_set_rate, +}; +EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_ops); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 5157742c173682750e4c25ae10cdb0e3e13c2143..772120d291988a7243312522e05c8324bd07799d 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -34,6 +34,7 @@ enum pll_type { REGERA_PLL, FABIA_PLL, AGERA_PLL, + LUCID_PLL, }; /** @@ -143,6 +144,9 @@ extern const struct clk_ops clk_fabia_pll_ops; extern const struct clk_ops clk_fabia_fixed_pll_ops; extern const struct clk_ops clk_generic_pll_postdiv_ops; extern const struct clk_ops clk_agera_pll_ops; +extern const struct clk_ops clk_alpha_pll_lucid_ops; +extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops; +extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops; void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); @@ -154,5 +158,6 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); - +int clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); #endif diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index ad5836569be37fd7c9f1d00035e794d87691d3b6..8963c4a691ef606efcd4929d322e3dd19958c7e1 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2017-2018 The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -189,6 +189,7 @@ const struct clk_ops clk_branch_ops = { .disable = clk_branch_disable, .is_enabled = clk_is_enabled_regmap, .set_flags = clk_branch_set_flags, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_branch_ops); @@ -378,6 +379,7 @@ const struct clk_ops clk_branch2_ops = { .set_flags = clk_branch_set_flags, .list_registers = clk_branch2_list_registers, .debug_init = clk_debug_measure_add, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_branch2_ops); @@ -443,6 +445,7 @@ const struct clk_ops clk_branch2_hw_ctl_ops = { .recalc_rate = clk_branch2_hw_ctl_recalc_rate, .determine_rate = clk_branch2_hw_ctl_determine_rate, .set_flags = clk_branch_set_flags, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_branch2_hw_ctl_ops); @@ -500,6 +503,7 @@ const struct clk_ops clk_gate2_ops = { .is_enabled = clk_is_enabled_regmap, .list_registers = clk_gate2_list_registers, .debug_init = clk_debug_measure_add, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_gate2_ops); @@ -507,5 +511,6 @@ const struct clk_ops clk_branch_simple_ops = { .enable = clk_enable_regmap, .disable = clk_disable_regmap, .is_enabled = clk_is_enabled_regmap, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_branch_simple_ops); diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 475ef6b0c12ea03174c6febf712ed81bb784d3d3..874a6f07b96e95c519278d1d43895301025534d6 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -305,6 +305,7 @@ static struct clk_osm l3_clk = { static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0); static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0); +static DEFINE_CLK_VOTER(l3_cluster2_vote_clk, l3_clk, 0); static DEFINE_CLK_VOTER(l3_misc_vote_clk, l3_clk, 0); static DEFINE_CLK_VOTER(l3_gpu_vote_clk, l3_clk, 0); @@ -458,6 +459,7 @@ static struct clk_osm cpu7_perfpcl_clk = { static struct clk_hw *osm_qcom_clk_hws[] = { [L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw, [L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw, + [L3_CLUSTER2_VOTE_CLK] = &l3_cluster2_vote_clk.hw, [L3_MISC_VOTE_CLK] = &l3_misc_vote_clk.hw, [L3_GPU_VOTE_CLK] = &l3_gpu_vote_clk.hw, [L3_CLK] = &l3_clk.hw, @@ -1037,6 +1039,7 @@ static void clk_cpu_osm_driver_sm6150_fixup(void) osm_qcom_clk_hws[CPU5_PERFCL_CLK] = NULL; osm_qcom_clk_hws[CPU7_PERFPCL_CLK] = NULL; osm_qcom_clk_hws[PERFPCL_CLK] = NULL; + osm_qcom_clk_hws[L3_CLUSTER2_VOTE_CLK] = NULL; osm_qcom_clk_hws[CPU4_PWRCL_CLK] = &cpu4_pwrcl_clk.hw; osm_qcom_clk_hws[CPU5_PWRCL_CLK] = &cpu5_pwrcl_clk.hw; osm_qcom_clk_hws[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw; @@ -1053,6 +1056,7 @@ static void clk_cpu_osm_driver_sdmshrike_fixup(void) { osm_qcom_clk_hws[CPU7_PERFPCL_CLK] = NULL; osm_qcom_clk_hws[PERFPCL_CLK] = NULL; + osm_qcom_clk_hws[L3_CLUSTER2_VOTE_CLK] = NULL; osm_qcom_clk_hws[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw; clk_cpu_map[7] = &cpu7_perfcl_clk; @@ -1173,6 +1177,8 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) "clk: Failed to enable cluster0 clock for L3\n"); WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk), "clk: Failed to enable cluster1 clock for L3\n"); + WARN(clk_prepare_enable(l3_cluster2_vote_clk.hw.clk), + "clk: Failed to enable cluster2 clock for L3\n"); WARN(clk_prepare_enable(l3_misc_vote_clk.hw.clk), "clk: Failed to enable misc clock for L3\n"); WARN(clk_prepare_enable(l3_gpu_vote_clk.hw.clk), diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c index 2250099af90526c073ae9fbc4562e5f2a79f7b59..4cc851ed438a057c2871c85d7c704d35b0348a0b 100644 --- a/drivers/clk/qcom/clk-debug.c +++ b/drivers/clk/qcom/clk-debug.c @@ -392,3 +392,9 @@ int clk_debug_measure_register(struct clk_hw *hw) } EXPORT_SYMBOL(clk_debug_measure_register); +void clk_debug_bus_vote(struct clk_hw *hw, bool enable) +{ + if (hw->init->bus_cl_id) + msm_bus_scale_client_update_request(hw->init->bus_cl_id, + enable); +} diff --git a/drivers/clk/qcom/clk-debug.h b/drivers/clk/qcom/clk-debug.h index d423d23997c227bbd784199a5b89622395c3c4d5..2f7b26124649cf6ed5ed56ff67042509d095496a 100644 --- a/drivers/clk/qcom/clk-debug.h +++ b/drivers/clk/qcom/clk-debug.h @@ -140,5 +140,6 @@ extern const struct clk_ops clk_debug_mux_ops; int clk_debug_measure_register(struct clk_hw *hw); int clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry); +void clk_debug_bus_vote(struct clk_hw *hw, bool enable); #endif diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index d4ac18485d6de036e430c122bdd27066cf5d82a9..175422170c1742fbba99f15b42ead19ad6c4df60 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -28,6 +28,7 @@ #include "clk-rcg.h" #include "common.h" +#include "clk-debug.h" #define CMD_REG 0x0 #define CMD_UPDATE BIT(0) @@ -510,7 +511,8 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, } if (rcg->flags & FORCE_ENABLE_RCG) { - rcg->current_freq = clk_get_rate(hw->clk); + rcg->current_freq = DIV_ROUND_CLOSEST_ULL( + clk_get_rate(hw->clk), 1000) * 1000; if (rcg->current_freq == cxo_f.freq) curr_src_index = 0; else { @@ -682,6 +684,7 @@ const struct clk_ops clk_rcg2_ops = { .set_rate_and_parent = clk_rcg2_set_rate_and_parent, .list_rate = clk_rcg2_list_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_rcg2_ops); @@ -695,6 +698,7 @@ const struct clk_ops clk_rcg2_floor_ops = { .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, .list_rate = clk_rcg2_list_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); @@ -775,6 +779,7 @@ const struct clk_ops clk_rcg2_shared_ops = { .determine_rate = clk_rcg2_determine_rate, .set_rate = clk_rcg2_shared_set_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); @@ -903,6 +908,7 @@ const struct clk_ops clk_edp_pixel_ops = { .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, .determine_rate = clk_edp_pixel_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); @@ -962,6 +968,7 @@ const struct clk_ops clk_byte_ops = { .set_rate_and_parent = clk_byte_set_rate_and_parent, .determine_rate = clk_byte_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_byte_ops); @@ -1033,6 +1040,7 @@ const struct clk_ops clk_byte2_ops = { .set_rate_and_parent = clk_byte2_set_rate_and_parent, .determine_rate = clk_byte2_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_byte2_ops); @@ -1125,6 +1133,7 @@ const struct clk_ops clk_pixel_ops = { .set_rate_and_parent = clk_pixel_set_rate_and_parent, .determine_rate = clk_pixel_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_pixel_ops); @@ -1210,6 +1219,7 @@ const struct clk_ops clk_dp_ops = { .set_rate_and_parent = clk_dp_set_rate_and_parent, .determine_rate = clk_dp_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_dp_ops); @@ -1301,6 +1311,7 @@ const struct clk_ops clk_gfx3d_ops = { .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, .determine_rate = clk_gfx3d_determine_rate, .list_registers = clk_rcg2_list_registers, + .bus_vote = clk_debug_bus_vote, }; EXPORT_SYMBOL_GPL(clk_gfx3d_ops); diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index bf1271373d768c894542ad3978ae3b30866ea2ee..f8e89d4c47195d1d3f8ffffbc7d07c8d6d1d8f2c 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -372,6 +372,7 @@ static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,rpmh-clk-sdmshrike", .data = &clk_rpmh_sdmshrike}, { .compatible = "qcom,rpmh-clk-sm6150", .data = &clk_rpmh_sm6150}, { .compatible = "qcom,rpmh-clk-sdmmagpie", .data = &clk_rpmh_sm6150}, + { .compatible = "qcom,rpmh-clk-sdxprairie", .data = &clk_rpmh_sm8150}, { } }; MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); diff --git a/drivers/clk/qcom/dispcc-sdmmagpie.c b/drivers/clk/qcom/dispcc-sdmmagpie.c index 6f6cbe90eea4436ebad63e6eecd9a7977c8a343d..0e4095403746366ac509e322941b259805ccda16 100644 --- a/drivers/clk/qcom/dispcc-sdmmagpie.c +++ b/drivers/clk/qcom/dispcc-sdmmagpie.c @@ -104,7 +104,7 @@ static const struct parent_map disp_cc_parent_map_3[] = { static const char * const disp_cc_parent_names_3[] = { "bi_tcxo", "disp_cc_pll0", - "gpll0", + "gcc_disp_gpll0_clk_src", "disp_cc_pll0_out_even", "core_bi_pll_test_se", }; @@ -131,7 +131,7 @@ static const struct parent_map disp_cc_parent_map_5[] = { static const char * const disp_cc_parent_names_5[] = { "bi_tcxo", - "gpll0", + "gcc_disp_gpll0_clk_src", "core_bi_pll_test_se", }; @@ -195,6 +195,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { .name = "disp_cc_mdss_ahb_clk_src", .parent_names = disp_cc_parent_names_5, .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_cx, .num_rate_max = VDD_NUM, @@ -636,7 +637,7 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = { "disp_cc_mdss_byte0_div_clk_src", }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_GET_RATE_NOCACHE, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/dispcc-sm6150.c b/drivers/clk/qcom/dispcc-sm6150.c index 1f84e3add24d75ec08f100fbf5e58f5cb5cbc34c..54944ec026f67418fda2ee10aaa46c3c980e82a1 100644 --- a/drivers/clk/qcom/dispcc-sm6150.c +++ b/drivers/clk/qcom/dispcc-sm6150.c @@ -256,6 +256,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = { }; static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = { + F( 162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), F( 270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), F( 540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), { } diff --git a/drivers/clk/qcom/dispcc-sm8150.c b/drivers/clk/qcom/dispcc-sm8150.c index 1220c28e1164791c935372cb168dbb5b4871fb03..526cc5a78b680d20e23f9d532c505ae8afa3eb34 100644 --- a/drivers/clk/qcom/dispcc-sm8150.c +++ b/drivers/clk/qcom/dispcc-sm8150.c @@ -24,8 +24,10 @@ #include #include #include +#include #include +#include #include "common.h" #include "clk-regmap-divider.h" @@ -38,8 +40,40 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } +#define MSM_BUS_VECTOR(_src, _dst, _ab, _ib) \ +{ \ + .src = _src, \ + .dst = _dst, \ + .ab = _ab, \ + .ib = _ib, \ +} + static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); +static struct msm_bus_vectors clk_debugfs_vectors[] = { + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_DISPLAY_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_DISPLAY_CFG, 0, 1), +}; + +static struct msm_bus_paths clk_debugfs_usecases[] = { + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[0], + }, + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[1], + } +}; + +static struct msm_bus_scale_pdata clk_debugfs_scale_table = { + .usecase = clk_debugfs_usecases, + .num_usecases = ARRAY_SIZE(clk_debugfs_usecases), + .name = "clk_dispcc_debugfs", +}; + #define DISP_CC_MISC_CMD 0x8000 enum { @@ -1562,6 +1596,8 @@ static int disp_cc_sm8150_probe(struct platform_device *pdev) struct regmap *regmap; struct clk *clk; int ret = 0; + int i; + unsigned int dispcc_bus_id; regmap = qcom_cc_map(pdev, &disp_cc_sm8150_desc); if (IS_ERR(regmap)) { @@ -1584,6 +1620,18 @@ static int disp_cc_sm8150_probe(struct platform_device *pdev) "Unable to get vdd_mm regulator\n"); return PTR_ERR(vdd_mm.regulator[0]); } + vdd_mm.use_max_uV = true; + + dispcc_bus_id = msm_bus_scale_register_client(&clk_debugfs_scale_table); + if (!dispcc_bus_id) { + dev_err(&pdev->dev, "Unable to register for bw voting\n"); + return -EPROBE_DEFER; + } + for (i = 0; i < ARRAY_SIZE(disp_cc_sm8150_clocks); i++) + if (disp_cc_sm8150_clocks[i]) + *(unsigned int *)(void *) + &disp_cc_sm8150_clocks[i]->hw.init->bus_cl_id = + dispcc_bus_id; ret = disp_cc_sm8150_fixup(pdev, regmap); if (ret) diff --git a/drivers/clk/qcom/gcc-qcs405.c b/drivers/clk/qcom/gcc-qcs405.c index 3f6c4328a7e0b5c7383b9a88835cc79671817e53..c96da2882b2aef665ed4ad083fc634e2a26ea669 100644 --- a/drivers/clk/qcom/gcc-qcs405.c +++ b/drivers/clk/qcom/gcc-qcs405.c @@ -1044,12 +1044,12 @@ static struct clk_rcg2 hdmi_pclk_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = gcc_parent_map_8, - .freq_tbl = ftbl_esc0_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "hdmi_pclk_clk_src", .parent_names = gcc_parent_names_8, .num_parents = 3, - .ops = &clk_rcg2_ops, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, .vdd_class = &vdd_cx, .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { diff --git a/drivers/clk/qcom/gcc-sdxprairie.c b/drivers/clk/qcom/gcc-sdxprairie.c new file mode 100644 index 0000000000000000000000000000000000000000..bf3603272a8ee48990f19e66b92829ca88f93c74 --- /dev/null +++ b/drivers/clk/qcom/gcc-sdxprairie.c @@ -0,0 +1,1961 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include + +#include + +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "reset.h" +#include "clk-alpha-pll.h" +#include "vdd-level.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_EVEN, + P_GPLL0_OUT_MAIN, + P_GPLL4_OUT_EVEN, + P_GPLL5_OUT_MAIN, + P_SLEEP_CLK, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_0[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "core_bi_pll_test_se", +}; +static const char * const gcc_parent_names_0_ao[] = { + "bi_tcxo_ao", + "gpll0", + "gpll0_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_1[] = { + "bi_tcxo", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_EVEN, 2 }, + { P_GPLL5_OUT_MAIN, 5 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_2[] = { + "bi_tcxo", + "gpll0", + "gpll4_out_even", + "gpll5", + "gpll0_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_3[] = { + "bi_tcxo", + "gpll0", + "sleep_clk", + "gpll0_out_even", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_4[] = { + "bi_tcxo", + "sleep_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_EVEN, 2 }, + { P_GPLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_5[] = { + "bi_tcxo", + "gpll0", + "gpll4_out_even", + "gpll0_out_even", + "core_bi_pll_test_se", +}; + +static struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static struct clk_alpha_pll gpll0 = { + .offset = 0x0, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .clkr = { + .enable_reg = 0x6d000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_lucid_even[] = { + { 0x0, 1 }, + { 0x1, 2 }, + { 0x3, 4 }, + { 0x7, 8 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_lucid_even, + .num_post_div = ARRAY_SIZE(post_div_table_lucid_even), + .width = 4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0_out_even", + .parent_names = (const char *[]){ "gpll0" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll4 = { + .offset = 0x76000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .clkr = { + .enable_reg = 0x6d000, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gpll4", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll_postdiv gpll4_out_even = { + .offset = 0x76000, + .post_div_shift = 8, + .post_div_table = post_div_table_lucid_even, + .num_post_div = ARRAY_SIZE(post_div_table_lucid_even), + .width = 4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll4_out_even", + .parent_names = (const char *[]){ "gpll4" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll5 = { + .offset = 0x74000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .clkr = { + .enable_reg = 0x6d000, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gpll5", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x11024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 50000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = { + F(960000, P_BI_TCXO, 10, 1, 2), + F(4800000, P_BI_TCXO, 4, 0, 0), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2), + F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x1100c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 25000000, + [VDD_NOMINAL] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x13024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x1300c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 25000000, + [VDD_NOMINAL] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x15024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x1500c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 25000000, + [VDD_NOMINAL] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x17024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 50000000}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x1700c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 25000000, + [VDD_NOMINAL] = 50000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = { + F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625), + F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625), + F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2), + F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2), + F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2), + F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2), + F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2), + F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2), + F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5), + F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), + F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2), + F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2), + F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2), + F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75), + F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0), + F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375), + F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0), + F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375), + F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75), + F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = { + .cmd_rcgr = 0x1200c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart1_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 48000000, + [VDD_NOMINAL] = 63157895}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = { + .cmd_rcgr = 0x1400c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart2_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 48000000, + [VDD_NOMINAL] = 63157895}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = { + .cmd_rcgr = 0x1600c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart3_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 48000000, + [VDD_NOMINAL] = 63157895}, + }, +}; + +static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = { + .cmd_rcgr = 0x1800c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart4_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 48000000, + [VDD_NOMINAL] = 63157895}, + }, +}; + +static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { + .cmd_rcgr = 0x24010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk_src", + .parent_names = gcc_parent_names_0_ao, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx_ao, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { + .cmd_rcgr = 0x2402c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_rbcpr_clk_src", + .parent_names = gcc_parent_names_0_ao, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx_ao, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac_clk_src[] = { + F(2500000, P_BI_TCXO, 1, 25, 192), + F(5000000, P_BI_TCXO, 1, 25, 96), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac_clk_src = { + .cmd_rcgr = 0x47020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_emac_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 250000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(230400000, P_GPLL5_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac_ptp_clk_src = { + .cmd_rcgr = 0x47038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_emac_ptp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_ptp_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 230400000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x2b004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x2c004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x2d004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = { + .cmd_rcgr = 0x37034, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_aux_phy_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_rchng_phy_clk_src[] = { + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = { + .cmd_rcgr = 0x37050, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_rchng_phy_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_rchng_phy_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 100000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x19010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 9600000, + [VDD_LOWER] = 19200000, + [VDD_LOW] = 60000000}, + }, +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0xf00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000, + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = { + F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_master_clk_src = { + .cmd_rcgr = 0xb024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_master_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk_src[] = { + F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = { + .cmd_rcgr = 0xb03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_mock_utmi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 60000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = { + F(1000000, P_BI_TCXO, 1, 5, 96), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = { + .cmd_rcgr = 0xb064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_aux_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 19200000}, + }, +}; + +static struct clk_branch gcc_ahb_pcie_link_clk = { + .halt_reg = 0x22004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x22004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ahb_pcie_link_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x10004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { + .halt_reg = 0x11008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_i2c_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { + .halt_reg = 0x11004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_spi_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { + .halt_reg = 0x13008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_i2c_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { + .halt_reg = 0x13004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_spi_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { + .halt_reg = 0x15008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_i2c_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { + .halt_reg = 0x15004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x15004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_spi_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_i2c_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_spi_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart1_apps_clk = { + .halt_reg = 0x12004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart1_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart2_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart3_apps_clk = { + .halt_reg = 0x16004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart3_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_uart3_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart4_apps_clk = { + .halt_reg = 0x18004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x18004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart4_apps_clk", + .parent_names = (const char *[]){ + "gcc_blsp1_uart4_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x1c004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1c004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_ahb_clk = { + .halt_reg = 0x2100c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2100c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_axi_clk = { + .halt_reg = 0x21008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_clk = { + .halt_reg = 0x21004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_ahb_clk = { + .halt_reg = 0x24000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk", + .parent_names = (const char *[]){ + "gcc_cpuss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_gnoc_clk = { + .halt_reg = 0x24004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x24004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_gnoc_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_rbcpr_clk = { + .halt_reg = 0x24008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x24008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_rbcpr_clk", + .parent_names = (const char *[]){ + "gcc_cpuss_rbcpr_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_axi_clk = { + .halt_reg = 0x4701c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_ptp_clk = { + .halt_reg = 0x47018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x47018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_ptp_clk", + .parent_names = (const char *[]){ + "gcc_emac_ptp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_rgmii_clk = { + .halt_reg = 0x47010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x47010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_rgmii_clk", + .parent_names = (const char *[]){ + "gcc_emac_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_slave_ahb_clk = { + .halt_reg = 0x47014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x47014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_slave_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x2b000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2b000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_names = (const char *[]){ + "gcc_gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x2c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_names = (const char *[]){ + "gcc_gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x2d000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2d000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_names = (const char *[]){ + "gcc_gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_clkref_clk = { + .halt_reg = 0x88004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x88004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_aux_clk = { + .halt_reg = 0x37024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_aux_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_cfg_ahb_clk = { + .halt_reg = 0x3701c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_mstr_axi_clk = { + .halt_reg = 0x37018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_pipe_clk = { + .halt_reg = 0x3702c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_rchng_phy_clk = { + .halt_reg = 0x37020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_rchng_phy_clk", + .parent_names = (const char *[]){ + "gcc_pcie_rchng_phy_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_sleep_clk = { + .halt_reg = 0x37028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_sleep_clk", + .parent_names = (const char *[]){ + "gcc_pcie_aux_phy_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_slv_axi_clk = { + .halt_reg = 0x37014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x37014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_slv_q2a_axi_clk = { + .halt_reg = 0x37010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d010, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x1900c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1900c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]){ + "gcc_pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x19004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x19004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x19004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x19008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x19008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0xf008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0xf004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]){ + "gcc_sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { + .halt_reg = 0x4010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x6d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_cpuss_ahb_clk", + .parent_names = (const char *[]){ + "gcc_cpuss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_master_clk = { + .halt_reg = 0xb010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_master_clk", + .parent_names = (const char *[]){ + "gcc_usb30_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mock_utmi_clk = { + .halt_reg = 0xb020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_mock_utmi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mstr_axi_clk = { + .halt_reg = 0xb014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sleep_clk = { + .halt_reg = 0xb01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_slv_ahb_clk = { + .halt_reg = 0xb018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_aux_clk = { + .halt_reg = 0xb058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb3_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_pipe_clk = { + .halt_reg = 0xb05c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb05c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_clkref_clk = { + .halt_reg = 0x88000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x88000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = { + .halt_reg = 0xe004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xe004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xe004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_phy_cfg_ahb2phy_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_xo_pcie_link_clk = { + .halt_reg = 0x22008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x22008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_xo_pcie_link_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* Measure-only clock for ddrss_gcc_debug_clk. */ +static struct clk_dummy measure_only_bimc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_bimc_clk", + .ops = &clk_dummy_ops, + }, +}; + +/* Measure-only clock for gcc_ipa_2x_clk. */ +static struct clk_dummy measure_only_ipa_2x_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_ipa_2x_clk", + .ops = &clk_dummy_ops, + }, +}; + +/* Measure-only clock for gcc_sys_noc_axi_clk. */ +static struct clk_dummy measure_only_snoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_snoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +struct clk_hw *gcc_sdxprairie_hws[] = { + [MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw, + [MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw, + [MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw, +}; + +static struct clk_regmap *gcc_sdxprairie_clocks[] = { + [GCC_AHB_PCIE_LINK_CLK] = &gcc_ahb_pcie_link_clk.clkr, + [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] = + &gcc_blsp1_qup1_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = + &gcc_blsp1_qup1_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] = + &gcc_blsp1_qup2_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = + &gcc_blsp1_qup2_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] = + &gcc_blsp1_qup3_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = + &gcc_blsp1_qup3_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] = + &gcc_blsp1_qup4_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] = + &gcc_blsp1_qup4_spi_apps_clk_src.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr, + [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr, + [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr, + [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr, + [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr, + [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr, + [GCC_CE1_CLK] = &gcc_ce1_clk.clkr, + [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, + [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, + [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr, + [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, + [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr, + [GCC_EMAC_CLK_SRC] = &gcc_emac_clk_src.clkr, + [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr, + [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr, + [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr, + [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr, + [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr, + [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr, + [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr, + [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr, + [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr, + [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr, + [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr, + [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr, + [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr, + [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr, + [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, + [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr, + [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr, + [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr, + [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr, + [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr, + [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr, + [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr, + [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr, + [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr, + [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr, + [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr, + [GCC_XO_PCIE_LINK_CLK] = &gcc_xo_pcie_link_clk.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr, + [GPLL4] = &gpll4.clkr, + [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr, + [GPLL5] = &gpll5.clkr, +}; + +static const struct qcom_reset_map gcc_sdxprairie_resets[] = { + [GCC_EMAC_BCR] = { 0x47000 }, + [GCC_PCIE_BCR] = { 0x37000 }, + [GCC_PCIE_LINK_DOWN_BCR] = { 0x77000 }, + [GCC_PCIE_PHY_BCR] = { 0x39000 }, + [GCC_PCIE_PHY_COM_BCR] = { 0x78004 }, + [GCC_QUSB2PHY_BCR] = { 0xd000 }, + [GCC_USB30_BCR] = { 0xb000 }, + [GCC_USB3_PHY_BCR] = { 0xc000 }, + [GCC_USB3PHY_PHY_BCR] = { 0xc004 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0xe000 }, +}; + +static const struct regmap_config gcc_sdxprairie_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9b040, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sdxprairie_desc = { + .config = &gcc_sdxprairie_regmap_config, + .clks = gcc_sdxprairie_clocks, + .num_clks = ARRAY_SIZE(gcc_sdxprairie_clocks), + .hwclks = gcc_sdxprairie_hws, + .num_hwclks = ARRAY_SIZE(gcc_sdxprairie_hws), + .resets = gcc_sdxprairie_resets, + .num_resets = ARRAY_SIZE(gcc_sdxprairie_resets), +}; + +static const struct of_device_id gcc_sdxprairie_match_table[] = { + { .compatible = "qcom,gcc-sdxprairie" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sdxprairie_match_table); + +static int gcc_sdxprairie_probe(struct platform_device *pdev) +{ + struct clk *clk; + struct device *dev = &pdev->dev; + int ret = 0; + + clk = devm_clk_get(dev, "bi_tcxo"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Unable to get cxo clock\n"); + return PTR_ERR(clk); + } + + vdd_cx.regulator[0] = devm_regulator_get(dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(dev, "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao"); + if (IS_ERR(vdd_cx_ao.regulator[0])) { + if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx_ao regulator\n"); + return PTR_ERR(vdd_cx_ao.regulator[0]); + } + + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + ret = qcom_cc_probe(pdev, &gcc_sdxprairie_desc); + if (ret) + dev_err(&pdev->dev, "Failed to register GCC clocks\n"); + + return ret; +} + +static struct platform_driver gcc_sdxprairie_driver = { + .probe = gcc_sdxprairie_probe, + .driver = { + .name = "gcc-sdxprairie", + .of_match_table = gcc_sdxprairie_match_table, + }, +}; + +static int __init gcc_sdxprairie_init(void) +{ + return platform_driver_register(&gcc_sdxprairie_driver); +} +subsys_initcall(gcc_sdxprairie_init); + +static void __exit gcc_sdxprairie_exit(void) +{ + platform_driver_unregister(&gcc_sdxprairie_driver); +} +module_exit(gcc_sdxprairie_exit); + +MODULE_DESCRIPTION("QTI GCC SDXPRAIRIE Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:gcc-sdxprairie"); diff --git a/drivers/clk/qcom/gcc-sm6150.c b/drivers/clk/qcom/gcc-sm6150.c index c62f870b57eb9b2834273d318930937a9ad9d073..2c5c72f5c5c4e384c43e1b5c23943fe887387cbf 100644 --- a/drivers/clk/qcom/gcc-sm6150.c +++ b/drivers/clk/qcom/gcc-sm6150.c @@ -299,6 +299,8 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { }; static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0), F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0), F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0), @@ -325,12 +327,24 @@ static struct clk_rcg2 gcc_emac_ptp_clk_src = { }, }; +static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = { + F(2500000, P_BI_TCXO, 1, 25, 192), + F(5000000, P_BI_TCXO, 1, 25, 96), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0), + F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0), + F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), + F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + static struct clk_rcg2 gcc_emac_rgmii_clk_src = { .cmd_rcgr = 0x601c, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_parent_map_6, - .freq_tbl = ftbl_gcc_emac_ptp_clk_src, + .freq_tbl = ftbl_gcc_emac_rgmii_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_emac_rgmii_clk_src", .parent_names = gcc_parent_names_6, @@ -1088,6 +1102,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { }; static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), F(20000000, P_GPLL0_OUT_AUX2, 15, 0, 0), F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0), F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), @@ -2720,6 +2735,7 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { }; static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_reg = 0x7701c, .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x7701c, @@ -2732,6 +2748,7 @@ static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { }; static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_reg = 0x77018, .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x77018, @@ -3164,6 +3181,19 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = { }, }; +static struct clk_branch gcc_rx3_usb2_clkref_clk = { + .halt_reg = 0x8c038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8c038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_rx3_usb2_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_usb2_prim_clkref_clk = { .halt_reg = 0x8c028, .halt_check = BRANCH_HALT_VOTED, @@ -3407,6 +3437,7 @@ static struct clk_regmap *gcc_sm6150_clocks[] = { [GPLL7_OUT_MAIN] = &gpll7_out_main.clkr, [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr, [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr, + [GCC_RX3_USB2_CLKREF_CLK] = &gcc_rx3_usb2_clkref_clk.clkr, [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr, [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr, }; diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 8edc1364458336bf0815f122ba863e7243f6496d..f5321640b84a88023fa7ec5597ae28366ae94e72 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -4259,6 +4259,7 @@ static int gcc_sm8150_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Unable to get vdd_mm regulator\n"); return PTR_ERR(vdd_mm.regulator[0]); } + vdd_mm.use_max_uV = true; /* register hardware clocks */ for (i = 0; i < ARRAY_SIZE(gcc_sm8150_hws); i++) { diff --git a/drivers/clk/qcom/gpucc-sdmmagpie.c b/drivers/clk/qcom/gpucc-sdmmagpie.c index b2d18f94ddaa877c3593e360fb77434fd102a6f4..910da9fd166142b8417a199c17b31adba6457e39 100644 --- a/drivers/clk/qcom/gpucc-sdmmagpie.c +++ b/drivers/clk/qcom/gpucc-sdmmagpie.c @@ -83,6 +83,7 @@ enum { P_GPU_CC_PLL1_OUT_EVEN, P_GPU_CC_PLL1_OUT_MAIN, P_GPU_CC_PLL1_OUT_ODD, + P_CRC_DIV, }; static const struct parent_map gpu_cc_parent_map_0[] = { @@ -123,6 +124,25 @@ static const char * const gpu_cc_parent_names_1[] = { "core_bi_pll_test_se", }; +static const struct parent_map gpu_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CRC_DIV, 1 }, + { P_GPU_CC_PLL0_OUT_ODD, 2 }, + { P_GPU_CC_PLL1_OUT_EVEN, 3 }, + { P_GPU_CC_PLL1_OUT_ODD, 4 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpu_cc_parent_names_2[] = { + "bi_tcxo", + "crc_div", + "gpu_cc_pll0_out_odd", + "gpu_cc_pll1_out_even", + "gpu_cc_pll1_out_odd", + "gcc_gpu_gpll0_clk_src", + "core_bi_pll_test_se", +}; static struct pll_vco fabia_vco[] = { { 249600000, 2000000000, 0 }, { 125000000, 1000000000, 1 }, @@ -183,6 +203,54 @@ static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = { }, }; +static struct clk_fixed_factor crc_div = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data){ + .name = "crc_div", + .parent_names = (const char *[]){ "gpu_cc_pll0_out_even" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x100, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .type = FABIA_PLL, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_fabia_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1600000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_even = { + .offset = 0x100, + .post_div_shift = 8, + .post_div_table = post_div_table_fabia_even, + .num_post_div = ARRAY_SIZE(post_div_table_fabia_even), + .width = 4, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1_out_even", + .parent_names = (const char *[]){ "gpu_cc_pll1" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_generic_pll_postdiv_ops, + }, +}; static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0), @@ -208,16 +276,15 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { }, }; -/* PLL would be 2 times. */ static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { - F(180000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(267000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(355000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(430000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(565000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(650000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(750000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), - F(780000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(180000000, P_CRC_DIV, 1, 0, 0), + F(267000000, P_CRC_DIV, 1, 0, 0), + F(355000000, P_CRC_DIV, 1, 0, 0), + F(430000000, P_CRC_DIV, 1, 0, 0), + F(565000000, P_CRC_DIV, 1, 0, 0), + F(650000000, P_CRC_DIV, 1, 0, 0), + F(800000000, P_CRC_DIV, 1, 0, 0), + F(825000000, P_CRC_DIV, 1, 0, 0), { } }; @@ -225,12 +292,12 @@ static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { .cmd_rcgr = 0x101c, .mnd_width = 0, .hid_width = 5, - .parent_map = gpu_cc_parent_map_1, + .parent_map = gpu_cc_parent_map_2, .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src, .flags = FORCE_ENABLE_RCG, .clkr.hw.init = &(struct clk_init_data){ .name = "gpu_cc_gx_gfx3d_clk_src", - .parent_names = gpu_cc_parent_names_1, + .parent_names = gpu_cc_parent_names_2, .num_parents = 7, .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, @@ -243,8 +310,8 @@ static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { [VDD_GX_LOW_L1] = 430000000, [VDD_GX_NOMINAL] = 565000000, [VDD_GX_NOMINAL_L1] = 650000000, - [VDD_GX_HIGH] = 750000000, - [VDD_GX_HIGH_L1] = 780000000}, + [VDD_GX_HIGH] = 800000000, + [VDD_GX_HIGH_L1] = 825000000}, }, }; @@ -276,12 +343,13 @@ static struct clk_branch gpu_cc_acd_cxo_clk = { static struct clk_branch gpu_cc_ahb_clk = { .halt_reg = 0x1078, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x1078, .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gpu_cc_ahb_clk", + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -421,7 +489,7 @@ static struct clk_branch gpu_cc_gx_cxo_clk = { static struct clk_branch gpu_cc_gx_gfx3d_clk = { .halt_reg = 0x1054, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x1054, .enable_mask = BIT(0), @@ -488,6 +556,8 @@ static struct clk_regmap *gpu_cc_sdmmagpie_clocks[] = { [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr, [GPU_CC_PLL0] = &gpu_cc_pll0.clkr, [GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_PLL1_OUT_EVEN] = &gpu_cc_pll1_out_even.clkr, }; static const struct regmap_config gpu_cc_sdmmagpie_regmap_config = { @@ -545,6 +615,13 @@ static int gpu_cc_sdmmagpie_probe(struct platform_device *pdev) /* Avoid turning on the rail during clock registration */ vdd_gx.skip_handoff = true; + /* Register clock fixed factor for CRC divide. */ + ret = devm_clk_hw_register(&pdev->dev, &crc_div.hw); + if (ret) { + dev_err(&pdev->dev, "Failed to register hardware clock\n"); + return ret; + } + regmap = qcom_cc_map(pdev, &gpu_cc_sdmmagpie_desc); if (IS_ERR(regmap)) { pr_err("Failed to map the gpu_cc registers\n"); @@ -552,6 +629,7 @@ static int gpu_cc_sdmmagpie_probe(struct platform_device *pdev) } clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll0_config); ret = qcom_cc_really_probe(pdev, &gpu_cc_sdmmagpie_desc, regmap); if (ret) { diff --git a/drivers/clk/qcom/gpucc-sm6150.c b/drivers/clk/qcom/gpucc-sm6150.c index 84191466c5a9eea355555ca96e7f3e34a837e697..f56e68620ffe5e3084c3739bd4120ea3678b7cb7 100644 --- a/drivers/clk/qcom/gpucc-sm6150.c +++ b/drivers/clk/qcom/gpucc-sm6150.c @@ -37,6 +37,8 @@ #define CX_GMU_CBCR_SLEEP_SHIFT 4 #define CX_GMU_CBCR_WAKE_MASK 0xf #define CX_GMU_CBCR_WAKE_SHIFT 8 +#define GFX3D_CRC_SID_FSM_CTRL 0x1024 +#define GFX3D_CRC_MND_CFG 0x1028 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } @@ -49,10 +51,10 @@ enum { P_GPLL0_OUT_MAIN, P_GPLL0_OUT_MAIN_DIV, P_GPU_CC_PLL0_2X_CLK, - P_GPU_CC_PLL0_OUT_AUX2, + P_CRC_DIV_PLL0_OUT_AUX2, P_GPU_CC_PLL0_OUT_MAIN, P_GPU_CC_PLL1_OUT_AUX, - P_GPU_CC_PLL1_OUT_AUX2, + P_CRC_DIV_PLL1_OUT_AUX2, P_GPU_CC_PLL1_OUT_MAIN, }; @@ -77,9 +79,9 @@ static const char * const gpu_cc_parent_names_0[] = { static const struct parent_map gpu_cc_parent_map_1[] = { { P_BI_TCXO, 0 }, { P_GPU_CC_PLL0_2X_CLK, 1 }, - { P_GPU_CC_PLL0_OUT_AUX2, 2 }, + { P_CRC_DIV_PLL0_OUT_AUX2, 2 }, { P_GPU_CC_PLL1_OUT_AUX, 3 }, - { P_GPU_CC_PLL1_OUT_AUX2, 4 }, + { P_CRC_DIV_PLL1_OUT_AUX2, 4 }, { P_GPLL0_OUT_MAIN, 5 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; @@ -87,9 +89,9 @@ static const struct parent_map gpu_cc_parent_map_1[] = { static const char * const gpu_cc_parent_names_1[] = { "bi_tcxo", "gpu_cc_pll0_out_aux", - "gpu_cc_pll0_out_aux2", + "crc_div_pll0_out_aux2", "gpu_cc_pll1_out_aux", - "gpu_cc_pll1_out_aux2", + "crc_div_pll1_out_aux2", "gpll0_out_main", "core_bi_pll_test_se", }; @@ -189,14 +191,38 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { }, }; +static struct clk_fixed_factor crc_div_pll0_out_aux2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "crc_div_pll0_out_aux2", + .parent_names = (const char *[]){ "gpu_cc_pll0_out_aux2" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor crc_div_pll1_out_aux2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "crc_div_pll1_out_aux2", + .parent_names = (const char *[]){ "gpu_cc_pll1_out_aux2" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { - F(290000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0), - F(435000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0), - F(550000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), - F(700000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), - F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), - F(845000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), - F(895000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + F(290000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0), + F(435000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0), + F(550000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0), + F(700000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0), + F(745000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0), + F(845000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0), + F(895000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0), { } }; @@ -408,6 +434,11 @@ static struct clk_branch gpu_cc_ahb_clk = { }, }; +struct clk_hw *gpu_cc_sm6150_hws[] = { + [CRC_DIV_PLL0_OUT_AUX2] = &crc_div_pll0_out_aux2.hw, + [CRC_DIV_PLL1_OUT_AUX2] = &crc_div_pll1_out_aux2.hw, +}; + static struct clk_regmap *gpu_cc_sm6150_clocks[] = { [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr, @@ -439,6 +470,8 @@ static const struct qcom_cc_desc gpu_cc_sm6150_desc = { .config = &gpu_cc_sm6150_regmap_config, .clks = gpu_cc_sm6150_clocks, .num_clks = ARRAY_SIZE(gpu_cc_sm6150_clocks), + .hwclks = gpu_cc_sm6150_hws, + .num_hwclks = ARRAY_SIZE(gpu_cc_sm6150_hws), }; static const struct of_device_id gpu_cc_sm6150_match_table[] = { @@ -495,6 +528,14 @@ static int gpu_cc_sm6150_probe(struct platform_device *pdev) regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, mask, value); + /* After POR, Clock Ramp Controller(CRC) will be in bypass mode. + * Software needs to do the following operation to enable the CRC + * for GFX3D clock and divide the input clock by div by 2. + */ + regmap_update_bits(regmap, GFX3D_CRC_MND_CFG, 0x00015011, 0x00015011); + regmap_update_bits(regmap, + GFX3D_CRC_SID_FSM_CTRL, 0x00800000, 0x00800000); + dev_info(&pdev->dev, "Registered GPU CC clocks\n"); return ret; diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile index 36e8ef88e92dc45a641e444d49d86ed01de1d203..28a987330133b500e473a4aa941ef89de2e02ec3 100644 --- a/drivers/clk/qcom/mdss/Makefile +++ b/drivers/clk/qcom/mdss/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28nm-util.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-14nm.o +obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-hdmi-pll-28lpm.o diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c index d013069acd4e76907d946f378cd465479b9ca866..00f189faa75517035a88a12a780bc4ddc432d170 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c @@ -1124,8 +1124,10 @@ static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw, u32 outdiv; u64 pll_freq, tmp64; - if (!vco->priv) + if (!vco->priv) { pr_err("vco priv is null\n"); + return 0; + } /* * Calculate the vco rate from HW registers only for handoff cases. diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c new file mode 100644 index 0000000000000000000000000000000000000000..c9fd4175444b1113ac7fcd0ea66da1abd9b0444b --- /dev/null +++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28lpm.c @@ -0,0 +1,782 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include "mdss-pll.h" +#include "mdss-hdmi-pll.h" + +/* HDMI PLL macros */ +#define HDMI_PHY_PLL_REFCLK_CFG (0x0400) +#define HDMI_PHY_PLL_CHRG_PUMP_CFG (0x0404) +#define HDMI_PHY_PLL_LOOP_FLT_CFG0 (0x0408) +#define HDMI_PHY_PLL_LOOP_FLT_CFG1 (0x040c) +#define HDMI_PHY_PLL_IDAC_ADJ_CFG (0x0410) +#define HDMI_PHY_PLL_I_VI_KVCO_CFG (0x0414) +#define HDMI_PHY_PLL_PWRDN_B (0x0418) +#define HDMI_PHY_PLL_SDM_CFG0 (0x041c) +#define HDMI_PHY_PLL_SDM_CFG1 (0x0420) +#define HDMI_PHY_PLL_SDM_CFG2 (0x0424) +#define HDMI_PHY_PLL_SDM_CFG3 (0x0428) +#define HDMI_PHY_PLL_SDM_CFG4 (0x042c) +#define HDMI_PHY_PLL_SSC_CFG0 (0x0430) +#define HDMI_PHY_PLL_SSC_CFG1 (0x0434) +#define HDMI_PHY_PLL_SSC_CFG2 (0x0438) +#define HDMI_PHY_PLL_SSC_CFG3 (0x043c) +#define HDMI_PHY_PLL_LOCKDET_CFG0 (0x0440) +#define HDMI_PHY_PLL_LOCKDET_CFG1 (0x0444) +#define HDMI_PHY_PLL_LOCKDET_CFG2 (0x0448) +#define HDMI_PHY_PLL_VCOCAL_CFG0 (0x044c) +#define HDMI_PHY_PLL_VCOCAL_CFG1 (0x0450) +#define HDMI_PHY_PLL_VCOCAL_CFG2 (0x0454) +#define HDMI_PHY_PLL_VCOCAL_CFG3 (0x0458) +#define HDMI_PHY_PLL_VCOCAL_CFG4 (0x045c) +#define HDMI_PHY_PLL_VCOCAL_CFG5 (0x0460) +#define HDMI_PHY_PLL_VCOCAL_CFG6 (0x0464) +#define HDMI_PHY_PLL_VCOCAL_CFG7 (0x0468) +#define HDMI_PHY_PLL_DEBUG_SEL (0x046c) +#define HDMI_PHY_PLL_MISC0 (0x0470) +#define HDMI_PHY_PLL_MISC1 (0x0474) +#define HDMI_PHY_PLL_MISC2 (0x0478) +#define HDMI_PHY_PLL_MISC3 (0x047c) +#define HDMI_PHY_PLL_MISC4 (0x0480) +#define HDMI_PHY_PLL_MISC5 (0x0484) +#define HDMI_PHY_PLL_MISC6 (0x0488) +#define HDMI_PHY_PLL_DEBUG_BUS0 (0x048c) +#define HDMI_PHY_PLL_DEBUG_BUS1 (0x0490) +#define HDMI_PHY_PLL_DEBUG_BUS2 (0x0494) +#define HDMI_PHY_PLL_STATUS0 (0x0498) +#define HDMI_PHY_PLL_STATUS1 (0x049c) + +#define HDMI_PHY_REG_0 (0x0000) +#define HDMI_PHY_REG_1 (0x0004) +#define HDMI_PHY_REG_2 (0x0008) +#define HDMI_PHY_REG_3 (0x000c) +#define HDMI_PHY_REG_4 (0x0010) +#define HDMI_PHY_REG_5 (0x0014) +#define HDMI_PHY_REG_6 (0x0018) +#define HDMI_PHY_REG_7 (0x001c) +#define HDMI_PHY_REG_8 (0x0020) +#define HDMI_PHY_REG_9 (0x0024) +#define HDMI_PHY_REG_10 (0x0028) +#define HDMI_PHY_REG_11 (0x002c) +#define HDMI_PHY_REG_12 (0x0030) +#define HDMI_PHY_REG_BIST_CFG (0x0034) +#define HDMI_PHY_DEBUG_BUS_SEL (0x0038) +#define HDMI_PHY_REG_MISC0 (0x003c) +#define HDMI_PHY_REG_13 (0x0040) +#define HDMI_PHY_REG_14 (0x0044) +#define HDMI_PHY_REG_15 (0x0048) + +/* HDMI PHY/PLL bit field macros */ +#define SW_RESET BIT(2) +#define SW_RESET_PLL BIT(0) +#define PWRDN_B BIT(7) + +#define PLL_PWRDN_B BIT(3) +#define REG_VTEST_EN BIT(2) +#define PD_PLL BIT(1) +#define PD_PLL_REG BIT(0) + + +#define HDMI_PLL_POLL_DELAY_US 50 +#define HDMI_PLL_POLL_TIMEOUT_US 500 + +static int hdmi_pll_lock_status(struct mdss_pll_resources *hdmi_pll_res) +{ + u32 status; + int pll_locked = 0; + int rc; + + rc = mdss_pll_resource_enable(hdmi_pll_res, true); + if (rc) { + pr_err("pll resource can't be enabled\n"); + return rc; + } + + /* poll for PLL ready status */ + if (readl_poll_timeout_atomic( + (hdmi_pll_res->pll_base + HDMI_PHY_PLL_STATUS0), + status, ((status & BIT(0)) == 1), + HDMI_PLL_POLL_DELAY_US, + HDMI_PLL_POLL_TIMEOUT_US)) { + pr_debug("HDMI PLL status=%x failed to Lock\n", status); + pll_locked = 0; + } else { + pr_debug("HDMI PLL locked\n"); + pll_locked = 1; + } + mdss_pll_resource_enable(hdmi_pll_res, false); + + return pll_locked; +} + +static void hdmi_pll_disable_28lpm(struct clk_hw *hw) +{ + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_pll_res = vco->priv; + u32 val; + + if (!hdmi_pll_res) { + pr_err("Invalid input parameter\n"); + return; + } + + val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_PHY_REG_12); + val &= (~PWRDN_B); + MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_PHY_REG_12, val); + + val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_PHY_PLL_PWRDN_B); + val |= PD_PLL; + val &= (~PLL_PWRDN_B); + MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_PHY_PLL_PWRDN_B, val); + + /* Make sure HDMI PHY/PLL are powered down */ + wmb(); + +} /* hdmi_pll_disable_28lpm */ + +static int hdmi_pll_enable_28lpm(struct clk_hw *hw) +{ + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_pll_res = vco->priv; + void __iomem *pll_base; + u32 val; + int pll_lock_retry = 10; + + pll_base = hdmi_pll_res->pll_base; + + /* Assert PLL S/W reset */ + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x8d); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG0, 0x10); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG1, 0x1a); + udelay(10); + /* De-assert PLL S/W reset */ + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x0d); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_1, 0xf2); + + udelay(10); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_2, 0x1f); + + val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_REG_12); + val |= BIT(5); + /* Assert PHY S/W reset */ + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val); + val &= ~BIT(5); + udelay(10); + /* De-assert PHY S/W reset */ + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val); + + val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_REG_12); + val |= PWRDN_B; + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_12, val); + + /* Wait 10 us for enabling global power for PHY */ + wmb(); + udelay(10); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_3, 0x20); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_4, 0x10); + + val = MDSS_PLL_REG_R(pll_base, HDMI_PHY_PLL_PWRDN_B); + val |= PLL_PWRDN_B; + val |= REG_VTEST_EN; + val &= ~PD_PLL; + val |= PD_PLL_REG; + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_PWRDN_B, val); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_REG_2, 0x81); + + do { + if (!hdmi_pll_lock_status(hdmi_pll_res)) { + /* PLL has still not locked. + * Do a software reset and try again + * Assert PLL S/W reset first + */ + MDSS_PLL_REG_W(pll_base, + HDMI_PHY_PLL_LOCKDET_CFG2, 0x8d); + + /* Wait for a short time before de-asserting + * to allow the hardware to complete its job. + * This much of delay should be fine for hardware + * to assert and de-assert. + */ + udelay(10); + MDSS_PLL_REG_W(pll_base, + HDMI_PHY_PLL_LOCKDET_CFG2, 0xd); + + /* Wait for a short duration for the PLL calibration + * before checking if the PLL gets locked + */ + udelay(350); + } else { + pr_debug("HDMI PLL locked\n"); + break; + } + + } while (--pll_lock_retry); + + if (!pll_lock_retry) { + pr_err("HDMI PLL not locked\n"); + hdmi_pll_disable_28lpm(hw); + return -EAGAIN; + } + + return 0; +} /* hdmi_pll_enable_28lpm */ + +static void hdmi_phy_pll_calculator_28lpm(unsigned long vco_rate, + struct mdss_pll_resources *hdmi_pll_res) +{ + u32 ref_clk = 19200000; + u32 integer_mode = 0; + u32 ref_clk_multiplier = integer_mode == 0 ? 2 : 1; + u32 int_ref_clk_freq = ref_clk * ref_clk_multiplier; + u32 refclk_cfg = 0; + u32 ten_power_six = 1000000; + u64 multiplier_q = 0; + u64 multiplier_r = 0; + u32 lf_cfg0 = 0; + u32 lf_cfg1 = 0; + u64 vco_cfg0 = 0; + u64 vco_cfg4 = 0; + u64 sdm_cfg0 = 0; + u64 sdm_cfg1 = 0; + u64 sdm_cfg2 = 0; + u32 val1 = 0; + u32 val2 = 0; + u32 val3 = 0; + void __iomem *pll_base = hdmi_pll_res->pll_base; + + multiplier_q = vco_rate; + multiplier_r = do_div(multiplier_q, int_ref_clk_freq); + + lf_cfg0 = multiplier_q > 30 ? 0 : (multiplier_q > 16 ? 16 : 32); + lf_cfg0 += integer_mode; + + lf_cfg1 = multiplier_q > 30 ? 0xc3 : (multiplier_q > 16 ? 0xbb : 0xf9); + + vco_cfg0 = vco_rate / ten_power_six; + vco_cfg4 = ((ref_clk * 5) / ten_power_six) - 1; + + sdm_cfg0 = (integer_mode * 64) + multiplier_q - 1; + sdm_cfg1 = 64 + multiplier_q - 1; + + sdm_cfg2 = (multiplier_r) * 65536; + do_div(sdm_cfg2, int_ref_clk_freq); + + pr_debug("lf_cfg0 = 0x%x lf_cfg1 = 0x%x\n", lf_cfg0, lf_cfg1); + pr_debug("vco_cfg0 = 0x%x vco_cfg4 = 0x%x\n", vco_cfg0, vco_cfg4); + pr_debug("sdm_cfg0 = 0x%x sdm_cfg1 = 0x%x sdm_cfg2 = 0x%x\n", + sdm_cfg0, sdm_cfg1, sdm_cfg2); + + refclk_cfg = MDSS_PLL_REG_R(pll_base, HDMI_PHY_PLL_REFCLK_CFG); + refclk_cfg &= ~0xf; + refclk_cfg |= (ref_clk_multiplier == 2) ? 0x8 + : (ref_clk_multiplier == 1) ? 0 : 0x2; + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_REFCLK_CFG, refclk_cfg); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_CHRG_PUMP_CFG, 0x02); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOOP_FLT_CFG0, lf_cfg0); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOOP_FLT_CFG1, lf_cfg1); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_IDAC_ADJ_CFG, 0x2c); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_I_VI_KVCO_CFG, 0x06); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_PWRDN_B, 0x0a); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG0, sdm_cfg0); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG1, sdm_cfg1); + + val1 = sdm_cfg2 & 0xff; + val2 = (sdm_cfg2 >> 8) & 0xff; + val3 = (sdm_cfg2 >> 16) & 0xff; + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG2, val1); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG3, val2); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SDM_CFG4, val3); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG0, 0x9a); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG1, 0x00); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG2, 0x00); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_SSC_CFG3, 0x00); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG0, 0x10); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG1, 0x1a); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_LOCKDET_CFG2, 0x0d); + + val1 = vco_cfg0 & 0xff; + val2 = (vco_cfg0 >> 8) & 0xff; + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG0, val1); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG1, val2); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG2, 0x3b); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG3, 0x00); + + val1 = vco_cfg4 & 0xff; + val2 = (vco_cfg4 >> 8) & 0xff; + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG4, val1); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG5, val2); + + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG6, 0x33); + MDSS_PLL_REG_W(pll_base, HDMI_PHY_PLL_VCOCAL_CFG7, 0x03); + +} + +int hdmi_vco_set_rate_28lpm(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_pll_res = vco->priv; + void __iomem *pll_base; + int rc; + + rc = mdss_pll_resource_enable(hdmi_pll_res, true); + if (rc) { + pr_err("pll resource can't be enabled\n"); + return rc; + } + + if (hdmi_pll_res->pll_on) + return 0; + + pll_base = hdmi_pll_res->pll_base; + + pr_debug("rate=%ld\n", rate); + + hdmi_phy_pll_calculator_28lpm(rate, hdmi_pll_res); + + /* Make sure writes complete before disabling iface clock */ + wmb(); + + vco->rate = rate; + hdmi_pll_res->vco_current_rate = rate; + + mdss_pll_resource_enable(hdmi_pll_res, false); + + + return 0; +} /* hdmi_pll_set_rate */ + +static unsigned long hdmi_vco_get_rate(struct hdmi_pll_vco_clk *vco) +{ + unsigned long freq = 0; + int rc = 0; + struct mdss_pll_resources *hdmi_pll_res = vco->priv; + + rc = mdss_pll_resource_enable(hdmi_pll_res, true); + if (rc) { + pr_err("Failed to enable hdmi pll resources\n"); + return 0; + } + + freq = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_VCOCAL_CFG1) << 8 | + MDSS_PLL_REG_R(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_VCOCAL_CFG0); + + switch (freq) { + case 742: + freq = 742500000; + break; + case 810: + if (MDSS_PLL_REG_R(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_SDM_CFG3) == 0x18) + freq = 810000000; + else + freq = 810900000; + break; + case 1342: + freq = 1342500000; + break; + default: + freq *= 1000000; + } + mdss_pll_resource_enable(hdmi_pll_res, false); + + return freq; +} + +long hdmi_vco_round_rate_28lpm(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long rrate = rate; + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + + if (rate < vco->min_rate) + rrate = vco->min_rate; + if (rate > vco->max_rate) + rrate = vco->max_rate; + + *parent_rate = rrate; + pr_debug("rrate=%ld\n", rrate); + + return rrate; +} + +int hdmi_vco_prepare_28lpm(struct clk_hw *hw) +{ + int rc = 0; + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_res = vco->priv; + + pr_debug("rate=%ld\n", clk_hw_get_rate(hw)); + rc = mdss_pll_resource_enable(hdmi_res, true); + if (rc) { + pr_err("Failed to enable mdss HDMI pll resources\n"); + goto error; + } + + if ((hdmi_res->vco_cached_rate != 0) + && (hdmi_res->vco_cached_rate == clk_hw_get_rate(hw))) { + rc = vco->hw.init->ops->set_rate(hw, + hdmi_res->vco_cached_rate, hdmi_res->vco_cached_rate); + if (rc) { + pr_err("index=%d vco_set_rate failed. rc=%d\n", + rc, hdmi_res->index); + mdss_pll_resource_enable(hdmi_res, false); + goto error; + } + } + + rc = hdmi_pll_enable_28lpm(hw); + if (rc) { + mdss_pll_resource_enable(hdmi_res, false); + pr_err("ndx=%d failed to enable hdmi pll\n", + hdmi_res->index); + goto error; + } + + mdss_pll_resource_enable(hdmi_res, false); + pr_debug("HDMI PLL enabled\n"); +error: + return rc; +} + +void hdmi_vco_unprepare_28lpm(struct clk_hw *hw) +{ + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_res = vco->priv; + + if (!hdmi_res) { + pr_err("Invalid input parameter\n"); + return; + } + + if (!hdmi_res->pll_on && + mdss_pll_resource_enable(hdmi_res, true)) { + pr_err("pll resource can't be enabled\n"); + return; + } + + hdmi_res->vco_cached_rate = clk_hw_get_rate(hw); + hdmi_pll_disable_28lpm(hw); + + hdmi_res->handoff_resources = false; + mdss_pll_resource_enable(hdmi_res, false); + hdmi_res->pll_on = false; + + pr_debug("HDMI PLL disabled\n"); +} + + +unsigned long hdmi_vco_recalc_rate_28lpm(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk_hw(hw); + struct mdss_pll_resources *hdmi_pll_res = vco->priv; + u64 vco_rate = 0; + + if (!hdmi_pll_res) { + pr_err("dsi pll resources not available\n"); + return 0; + } + + if (hdmi_pll_res->vco_current_rate) { + vco_rate = (unsigned long)hdmi_pll_res->vco_current_rate; + pr_debug("vco_rate=%ld\n", vco_rate); + return vco_rate; + } + + if (is_gdsc_disabled(hdmi_pll_res)) + return 0; + + if (mdss_pll_resource_enable(hdmi_pll_res, true)) { + pr_err("Failed to enable hdmi pll resources\n"); + return 0; + } + + if (hdmi_pll_lock_status(hdmi_pll_res)) { + hdmi_pll_res->handoff_resources = true; + hdmi_pll_res->pll_on = true; + vco_rate = hdmi_vco_get_rate(vco); + } else { + hdmi_pll_res->handoff_resources = false; + mdss_pll_resource_enable(hdmi_pll_res, false); + } + + pr_debug("vco_rate = %ld\n", vco_rate); + + return (unsigned long)vco_rate; +} + +static int hdmi_mux_set_parent(void *context, unsigned int reg, + unsigned int mux_sel) +{ + struct mdss_pll_resources *hdmi_pll_res = context; + int rc = 0; + u32 reg_val = 0; + + rc = mdss_pll_resource_enable(hdmi_pll_res, true); + if (rc) { + pr_err("Failed to enable hdmi pll resources\n"); + return rc; + } + + pr_debug("mux_sel = %d\n", mux_sel); + + reg_val = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_REFCLK_CFG); + reg_val &= ~0x70; + reg_val |= (mux_sel & 0x70); + pr_debug("pll_refclk_cfg = 0x%x\n", reg_val); + MDSS_PLL_REG_W(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_REFCLK_CFG, reg_val); + + (void)mdss_pll_resource_enable(hdmi_pll_res, false); + + return 0; +} + +static int hdmi_mux_get_parent(void *context, unsigned int reg, + unsigned int *val) +{ + int rc = 0; + int mux_sel = 0; + struct mdss_pll_resources *hdmi_pll_res = context; + + rc = mdss_pll_resource_enable(hdmi_pll_res, true); + if (rc) { + *val = 0; + pr_err("Failed to enable hdmi pll resources\n"); + } else { + mux_sel = MDSS_PLL_REG_R(hdmi_pll_res->pll_base, + HDMI_PHY_PLL_REFCLK_CFG); + mux_sel &= 0x70; + *val = mux_sel; + pr_debug("mux_sel = %d\n", *val); + } + + (void)mdss_pll_resource_enable(hdmi_pll_res, false); + + return rc; +} + +static struct regmap_config hdmi_pll_28lpm_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x49c, +}; + +static struct regmap_bus hdmi_pclk_src_mux_regmap_ops = { + .reg_write = hdmi_mux_set_parent, + .reg_read = hdmi_mux_get_parent, +}; + +/* Op structures */ +static const struct clk_ops hdmi_28lpm_vco_clk_ops = { + .recalc_rate = hdmi_vco_recalc_rate_28lpm, + .set_rate = hdmi_vco_set_rate_28lpm, + .round_rate = hdmi_vco_round_rate_28lpm, + .prepare = hdmi_vco_prepare_28lpm, + .unprepare = hdmi_vco_unprepare_28lpm, +}; + +static struct hdmi_pll_vco_clk hdmi_vco_clk = { + .min_rate = 540000000, + .max_rate = 1125000000, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_vco_clk", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &hdmi_28lpm_vco_clk_ops, + }, +}; + +static struct clk_fixed_factor hdmi_vco_divsel_one_clk_src = { + .div = 1, + .mult = 1, + + .hw.init = &(struct clk_init_data){ + .name = "hdmi_vco_divsel_one_clk_src", + .parent_names = + (const char *[]){ "hdmi_vco_clk" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor hdmi_vco_divsel_two_clk_src = { + .div = 2, + .mult = 1, + + .hw.init = &(struct clk_init_data){ + .name = "hdmi_vco_divsel_two_clk_src", + .parent_names = + (const char *[]){ "hdmi_vco_clk" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor hdmi_vco_divsel_four_clk_src = { + .div = 4, + .mult = 1, + + .hw.init = &(struct clk_init_data){ + .name = "hdmi_vco_divsel_four_clk_src", + .parent_names = + (const char *[]){ "hdmi_vco_clk" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_fixed_factor hdmi_vco_divsel_six_clk_src = { + .div = 6, + .mult = 1, + + .hw.init = &(struct clk_init_data){ + .name = "hdmi_vco_divsel_six_clk_src", + .parent_names = + (const char *[]){ "hdmi_vco_clk" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_regmap_mux hdmi_pclk_src_mux = { + .reg = HDMI_PHY_PLL_REFCLK_CFG, + .shift = 4, + .width = 2, + + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pclk_src_mux", + .parent_names = + (const char *[]){"hdmi_vco_divsel_one_clk_src", + "hdmi_vco_divsel_two_clk_src", + "hdmi_vco_divsel_six_clk_src", + "hdmi_vco_divsel_four_clk_src"}, + .num_parents = 4, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_fixed_factor hdmi_pclk_src = { + .div = 5, + .mult = 1, + + .hw.init = &(struct clk_init_data){ + .name = "hdmi_phy_pll_clk", + .parent_names = + (const char *[]){ "hdmi_pclk_src_mux" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_hw *mdss_hdmi_pllcc_28lpm[] = { + [HDMI_VCO_CLK] = &hdmi_vco_clk.hw, + [HDMI_VCO_DIVIDED_1_CLK_SRC] = &hdmi_vco_divsel_one_clk_src.hw, + [HDMI_VCO_DIVIDED_TWO_CLK_SRC] = &hdmi_vco_divsel_two_clk_src.hw, + [HDMI_VCO_DIVIDED_FOUR_CLK_SRC] = &hdmi_vco_divsel_four_clk_src.hw, + [HDMI_VCO_DIVIDED_SIX_CLK_SRC] = &hdmi_vco_divsel_six_clk_src.hw, + [HDMI_PCLK_SRC_MUX] = &hdmi_pclk_src_mux.clkr.hw, + [HDMI_PCLK_SRC] = &hdmi_pclk_src.hw, +}; + +int hdmi_pll_clock_register_28lpm(struct platform_device *pdev, + struct mdss_pll_resources *pll_res) +{ + int rc = -ENOTSUPP, i; + struct clk *clk; + struct clk_onecell_data *clk_data; + int num_clks = ARRAY_SIZE(mdss_hdmi_pllcc_28lpm); + struct regmap *regmap; + + if (!pdev || !pdev->dev.of_node || + !pll_res || !pll_res->pll_base) { + pr_err("Invalid input parameters\n"); + return -EPROBE_DEFER; + } + + clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data), + GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks * + sizeof(struct clk *)), GFP_KERNEL); + if (!clk_data->clks) { + devm_kfree(&pdev->dev, clk_data); + return -ENOMEM; + } + clk_data->clk_num = num_clks; + + /* Set client data for vco, mux and div clocks */ + regmap = devm_regmap_init(&pdev->dev, &hdmi_pclk_src_mux_regmap_ops, + pll_res, &hdmi_pll_28lpm_cfg); + hdmi_pclk_src_mux.clkr.regmap = regmap; + + hdmi_vco_clk.priv = pll_res; + + for (i = HDMI_VCO_CLK; i <= HDMI_PCLK_SRC; i++) { + pr_debug("reg clk: %d index: %d\n", i, pll_res->index); + clk = devm_clk_register(&pdev->dev, + mdss_hdmi_pllcc_28lpm[i]); + if (IS_ERR(clk)) { + pr_err("clk registration failed for HDMI: %d\n", + pll_res->index); + rc = -EINVAL; + goto clk_reg_fail; + } + clk_data->clks[i] = clk; + } + + rc = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, clk_data); + if (rc) { + pr_err("%s: Clock register failed rc=%d\n", __func__, rc); + rc = -EPROBE_DEFER; + } else { + pr_debug("%s SUCCESS\n", __func__); + rc = 0; + } + return rc; +clk_reg_fail: + devm_kfree(&pdev->dev, clk_data->clks); + devm_kfree(&pdev->dev, clk_data); + return rc; +} diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll.h b/drivers/clk/qcom/mdss/mdss-hdmi-pll.h index 5c73ed4714c3ef0596485d96409062058fdf4207..75f7fd3619e8ef7583782dc7ff748bec6d7a691e 100644 --- a/drivers/clk/qcom/mdss/mdss-hdmi-pll.h +++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ struct hdmi_pll_cfg { }; struct hdmi_pll_vco_clk { + struct clk_hw hw; unsigned long rate; /* current vco rate */ unsigned long min_rate; /* min vco rate */ unsigned long max_rate; /* max vco rate */ @@ -30,14 +31,16 @@ struct hdmi_pll_vco_clk { struct hdmi_pll_cfg *crctrl; void *priv; - struct clk c; }; -static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk) +static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk_hw(struct clk_hw *hw) { - return container_of(clk, struct hdmi_pll_vco_clk, c); + return container_of(hw, struct hdmi_pll_vco_clk, hw); } +int hdmi_pll_clock_register_28lpm(struct platform_device *pdev, + struct mdss_pll_resources *pll_res); + int hdmi_pll_clock_register(struct platform_device *pdev, struct mdss_pll_resources *pll_res); diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c index 50a2b722400031bf5eb34b573e743d34ccf7f080..d03c4e8ae233cb5c7375e8a989658ba296acb979 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.c +++ b/drivers/clk/qcom/mdss/mdss-pll.c @@ -22,6 +22,7 @@ #include "mdss-pll.h" #include "mdss-dsi-pll.h" #include "mdss-dp-pll.h" +#include "mdss-hdmi-pll.h" int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable) { @@ -139,6 +140,8 @@ static int mdss_pll_resource_parse(struct platform_device *pdev, pll_res->pll_interface_type = MDSS_DSI_PLL_14NM; else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_14nm")) pll_res->pll_interface_type = MDSS_DP_PLL_14NM; + else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_28lpm")) + pll_res->pll_interface_type = MDSS_HDMI_PLL_28LPM; else goto err; @@ -182,6 +185,9 @@ static int mdss_pll_clock_register(struct platform_device *pdev, case MDSS_DP_PLL_14NM: rc = dp_pll_clock_register_14nm(pdev, pll_res); break; + case MDSS_HDMI_PLL_28LPM: + rc = hdmi_pll_clock_register_28lpm(pdev, pll_res); + break; case MDSS_UNKNOWN_PLL: default: rc = -EINVAL; @@ -415,6 +421,7 @@ static const struct of_device_id mdss_pll_dt_match[] = { {.compatible = "qcom,mdss_dsi_pll_28lpm"}, {.compatible = "qcom,mdss_dsi_pll_14nm"}, {.compatible = "qcom,mdss_dp_pll_14nm"}, + {.compatible = "qcom,mdss_hdmi_pll_28lpm"}, {} }; diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h index 6bbde04494d5bdc98a63dacaa678d431b5a0c623..683753da97353354dea70b17a565e146603fbcec 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.h +++ b/drivers/clk/qcom/mdss/mdss-pll.h @@ -48,6 +48,7 @@ enum { MDSS_DSI_PLL_28LPM, MDSS_DSI_PLL_14NM, MDSS_DP_PLL_14NM, + MDSS_HDMI_PLL_28LPM, MDSS_UNKNOWN_PLL, }; diff --git a/drivers/clk/qcom/scc-sm8150.c b/drivers/clk/qcom/scc-sm8150.c index 6519d6bd4eb2b673515751c519d7ca2dd44d8f47..c3d5250b492fb8bbe0d97f97825bb7fb80283d27 100644 --- a/drivers/clk/qcom/scc-sm8150.c +++ b/drivers/clk/qcom/scc-sm8150.c @@ -84,7 +84,7 @@ static const struct alpha_pll_config scc_pll_config = { .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000002, .test_ctl_hi1_val = 0x00000000, - .user_ctl_val = 0x00000000, + .user_ctl_val = 0x00000100, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, }; @@ -95,7 +95,7 @@ static const struct alpha_pll_config scc_pll_config_sm8150_v2 = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, - .user_ctl_val = 0x00000000, + .user_ctl_val = 0x00000100, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, }; @@ -164,7 +164,6 @@ static struct clk_rcg2 scc_main_rcg_clk_src = { .name = "scc_main_rcg_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -223,7 +222,6 @@ static struct clk_rcg2 scc_qupv3_se0_clk_src = { .name = "scc_qupv3_se0_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -245,7 +243,6 @@ static struct clk_rcg2 scc_qupv3_se1_clk_src = { .name = "scc_qupv3_se1_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -267,7 +264,6 @@ static struct clk_rcg2 scc_qupv3_se2_clk_src = { .name = "scc_qupv3_se2_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -289,7 +285,6 @@ static struct clk_rcg2 scc_qupv3_se3_clk_src = { .name = "scc_qupv3_se3_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -311,7 +306,6 @@ static struct clk_rcg2 scc_qupv3_se4_clk_src = { .name = "scc_qupv3_se4_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, @@ -333,7 +327,6 @@ static struct clk_rcg2 scc_qupv3_se5_clk_src = { .name = "scc_qupv3_se5_clk_src", .parent_names = scc_parent_names_0, .num_parents = 8, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, .vdd_class = &vdd_scc_cx, .num_rate_max = VDD_NUM, diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c index 444ae14c100eb4ee73307a3ad0cfe28a5463b76f..435e2b872c12d7c1acbf4e0f4723a8efd4322ca6 100644 --- a/drivers/clk/qcom/videocc-sm8150.c +++ b/drivers/clk/qcom/videocc-sm8150.c @@ -24,8 +24,10 @@ #include #include #include +#include #include +#include #include "common.h" #include "clk-regmap.h" @@ -38,8 +40,40 @@ #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } +#define MSM_BUS_VECTOR(_src, _dst, _ab, _ib) \ +{ \ + .src = _src, \ + .dst = _dst, \ + .ab = _ab, \ + .ib = _ib, \ +} + static DEFINE_VDD_REGULATORS(vdd_mm, VDD_MM_NUM, 1, vdd_corner); +static struct msm_bus_vectors clk_debugfs_vectors[] = { + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_VENUS_CFG, 0, 0), + MSM_BUS_VECTOR(MSM_BUS_MASTER_AMPSS_M0, + MSM_BUS_SLAVE_VENUS_CFG, 0, 1), +}; + +static struct msm_bus_paths clk_debugfs_usecases[] = { + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[0], + }, + { + .num_paths = 1, + .vectors = &clk_debugfs_vectors[1], + } +}; + +static struct msm_bus_scale_pdata clk_debugfs_scale_table = { + .usecase = clk_debugfs_usecases, + .num_usecases = ARRAY_SIZE(clk_debugfs_usecases), + .name = "clk_videocc_debugfs", +}; + enum { P_BI_TCXO, P_CHIP_SLEEP_CLK, @@ -310,6 +344,8 @@ static int video_cc_sm8150_probe(struct platform_device *pdev) struct regmap *regmap; struct clk *clk; int ret; + int i; + unsigned int videocc_bus_id; regmap = qcom_cc_map(pdev, &video_cc_sm8150_desc); if (IS_ERR(regmap)) { @@ -331,6 +367,20 @@ static int video_cc_sm8150_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Unable to get vdd_mm regulator\n"); return PTR_ERR(vdd_mm.regulator[0]); } + vdd_mm.use_max_uV = true; + + videocc_bus_id = + msm_bus_scale_register_client(&clk_debugfs_scale_table); + if (!videocc_bus_id) { + dev_err(&pdev->dev, "Unable to register for bw voting\n"); + return -EPROBE_DEFER; + } + + for (i = 0; i < ARRAY_SIZE(video_cc_sm8150_clocks); i++) + if (video_cc_sm8150_clocks[i]) + *(unsigned int *)(void *) + &video_cc_sm8150_clocks[i]->hw.init->bus_cl_id = + videocc_bus_id; ret = video_cc_sm8150_fixup(pdev, regmap); if (ret) diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index 286b0049b7b604e461c8c2d1c85b13addf161686..a48fde191c0ae0e96824311e10bf590d364f1afb 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -223,7 +223,7 @@ static struct ccu_mux cpu_clk = { .hw.init = CLK_HW_INIT_PARENTS("cpu", cpu_parents, &ccu_mux_ops, - CLK_IS_CRITICAL), + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), } }; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index ebbd696f6c709e8e844123198aedade6f9d84bb5..235d21cda429b5acac0cdee0e1fc4cf0feb6a327 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -264,12 +264,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, * * @drv: the cpuidle driver * @dev: the cpuidle device + * @stop_tick: indication on whether or not to stop the tick * * Returns the index of the idle state. The return value must not be negative. + * + * The memory location pointed to by @stop_tick is expected to be written the + * 'false' boolean value if the scheduler tick should not be stopped before + * entering the returned state. */ -int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) +int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) { - return cpuidle_curr_governor->select(drv, dev); + return cpuidle_curr_governor->select(drv, dev, stop_tick); } /** diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ce1a2ffffb2a0f88ad9c43d89b34f9f553bc9009..0213e07abe9c2e969349da7cda454de92a316766 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -62,9 +62,10 @@ static inline void ladder_do_selection(struct ladder_device *ldev, * ladder_select_state - selects the next state to enter * @drv: cpuidle driver * @dev: the CPU + * @dummy: not used */ static int ladder_select_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *dummy) { struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device_state *last_state; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index a99d5620056d8803fe88de7ab272229eb92c6168..58c103b5892b2924976bf3d5cedfeecdbed4e932 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -123,6 +123,7 @@ struct menu_device { int last_state_idx; int needs_update; + int tick_wakeup; unsigned int next_timer_us; unsigned int predicted_us; @@ -284,8 +285,10 @@ static unsigned int get_typical_interval(struct menu_device *data) * menu_select - selects the next idle state to enter * @drv: cpuidle driver containing state data * @dev: the CPU + * @stop_tick: indication on whether or not to stop the tick */ -static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) +static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) { struct menu_device *data = this_cpu_ptr(&menu_devices); struct device *device = get_cpu_device(dev->cpu); @@ -297,6 +300,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; int resume_latency = dev_pm_qos_raw_read_value(device); + ktime_t delta_next; if (data->needs_update) { menu_update(drv, dev); @@ -308,11 +312,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) latency_req = resume_latency; /* Special case when user has set very strict latency requirement */ - if (unlikely(latency_req == 0)) + if (unlikely(latency_req == 0)) { + *stop_tick = false; return 0; + } /* determine the expected residency time, round up */ - data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length()); + data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); get_iowait_load(&nr_iowaiters, &cpu_load); data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); @@ -351,14 +357,30 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) */ data->predicted_us = min(data->predicted_us, expected_interval); - /* - * Use the performance multiplier and the user-configurable - * latency_req to determine the maximum exit latency. - */ - interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); - if (latency_req > interactivity_req) - latency_req = interactivity_req; + if (tick_nohz_tick_stopped()) { + /* + * If the tick is already stopped, the cost of possible short + * idle duration misprediction is much higher, because the CPU + * may be stuck in a shallow idle state for a long time as a + * result of it. In that case say we might mispredict and try + * to force the CPU into a state for which we would have stopped + * the tick, unless a timer is going to expire really soon + * anyway. + */ + if (data->predicted_us < TICK_USEC) + data->predicted_us = min_t(unsigned int, TICK_USEC, + ktime_to_us(delta_next)); + } else { + /* + * Use the performance multiplier and the user-configurable + * latency_req to determine the maximum exit latency. + */ + interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); + if (latency_req > interactivity_req) + latency_req = interactivity_req; + } + expected_interval = data->predicted_us; /* * Find the idle state with the lowest power while satisfying * our constraints. @@ -374,15 +396,52 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) idx = i; /* first enabled state */ if (s->target_residency > data->predicted_us) break; - if (s->exit_latency > latency_req) + if (s->exit_latency > latency_req) { + /* + * If we break out of the loop for latency reasons, use + * the target residency of the selected state as the + * expected idle duration so that the tick is retained + * as long as that target residency is low enough. + */ + expected_interval = drv->states[idx].target_residency; break; - + } idx = i; } if (idx == -1) idx = 0; /* No states enabled. Must use 0. */ + /* + * Don't stop the tick if the selected state is a polling one or if the + * expected idle duration is shorter than the tick period length. + */ + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || + expected_interval < TICK_USEC) { + unsigned int delta_next_us = ktime_to_us(delta_next); + + *stop_tick = false; + + if (!tick_nohz_tick_stopped() && idx > 0 && + drv->states[idx].target_residency > delta_next_us) { + /* + * The tick is not going to be stopped and the target + * residency of the state to be returned is not within + * the time until the next timer event including the + * tick, so try to correct that. + */ + for (i = idx - 1; i >= 0; i--) { + if (drv->states[i].disabled || + dev->states_usage[i].disable) + continue; + + idx = i; + if (drv->states[i].target_residency <= delta_next_us) + break; + } + } + } + data->last_state_idx = idx; return data->last_state_idx; @@ -402,6 +461,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index) data->last_state_idx = index; data->needs_update = 1; + data->tick_wakeup = tick_nohz_idle_got_tick(); } /** @@ -432,14 +492,27 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * assume the state was never reached and the exit latency is 0. */ - /* measured value */ - measured_us = cpuidle_get_last_residency(dev); - - /* Deduct exit latency */ - if (measured_us > 2 * target->exit_latency) - measured_us -= target->exit_latency; - else - measured_us /= 2; + if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { + /* + * The nohz code said that there wouldn't be any events within + * the tick boundary (if the tick was stopped), but the idle + * duration predictor had a differing opinion. Since the CPU + * was woken up by a tick (that wasn't stopped after all), the + * predictor was not quite right, so assume that the CPU could + * have been idle long (but not forever) to help the idle + * duration predictor do a better job next time. + */ + measured_us = 9 * MAX_INTERESTING / 10; + } else { + /* measured value */ + measured_us = cpuidle_get_last_residency(dev); + + /* Deduct exit latency */ + if (measured_us > 2 * target->exit_latency) + measured_us -= target->exit_latency; + else + measured_us /= 2; + } /* Make sure our coefficients do not exceed unity */ if (measured_us > data->next_timer_us) diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 42dc0f5b49048b0fa0cb3b2ca5cf6652ea36e329..16679458edb31516e9730a790253879c06a8c7ec 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -339,6 +339,11 @@ static void histtimer_cancel(void) { unsigned int cpu = raw_smp_processor_id(); struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu); + ktime_t time_rem; + + time_rem = hrtimer_get_remaining(cpu_histtimer); + if (ktime_to_us(time_rem) <= 0) + return; hrtimer_try_to_cancel(cpu_histtimer); } @@ -384,11 +389,21 @@ static void clusttimer_cancel(void) { int cpu = raw_smp_processor_id(); struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; + ktime_t time_rem; + + time_rem = hrtimer_get_remaining(&cluster->histtimer); + if (ktime_to_us(time_rem) > 0) + hrtimer_try_to_cancel(&cluster->histtimer); + + if (cluster->parent) { + time_rem = hrtimer_get_remaining( + &cluster->parent->histtimer); - hrtimer_try_to_cancel(&cluster->histtimer); + if (ktime_to_us(time_rem) <= 0) + return; - if (cluster->parent) hrtimer_try_to_cancel(&cluster->parent->histtimer); + } } static enum hrtimer_restart clusttimer_fn(struct hrtimer *h) @@ -596,7 +611,8 @@ static int cpu_power_select(struct cpuidle_device *dev, int best_level = 0; uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, dev->cpu); - s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length()); + ktime_t delta_next; + s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); uint32_t modified_time_us = 0; uint32_t next_event_us = 0; int i, idx_restrict; @@ -1051,7 +1067,7 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, * LPMs(XO and Vmin). */ if (!from_idle) - clock_debug_print_enabled(); + clock_debug_print_enabled(true); cpu = get_next_online_cpu(from_idle); cpumask_copy(&cpumask, cpumask_of(cpu)); @@ -1326,7 +1342,7 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) } static int lpm_cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *stop_tick) { struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu); @@ -1399,11 +1415,11 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, dev->last_residency = ktime_us_delta(ktime_get(), start); update_history(dev, idx); trace_cpu_idle_exit(idx, success); - local_irq_enable(); if (lpm_prediction && cpu->lpm_prediction) { histtimer_cancel(); clusttimer_cancel(); } + local_irq_enable(); return idx; } diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index ae9a7f280fb6f8df697a55b1aa751e5b69ee233a..0fcd5c3b8729670d0b40a436354bb9872bfa8ca3 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -307,7 +307,7 @@ static int _probe_ce_engine(struct qce_device *pce_dev) pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE; - dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n", + dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n", pce_dev->ce_bam_info.ce_device, pce_dev->iobase, pce_dev->ce_bam_info.dest_pipe_index, pce_dev->ce_bam_info.src_pipe_index, @@ -2443,6 +2443,9 @@ static int _qce_sps_add_sg_data(struct qce_device *pce_dev, struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; + if (!sg_src) + return -ENOENT; + while (nbytes > 0) { len = min(nbytes, sg_dma_len(sg_src)); nbytes -= len; diff --git a/drivers/crypto/msm/qcedev_smmu.c b/drivers/crypto/msm/qcedev_smmu.c index 08c4468f0a0b5dca3f56930fbb45d5421566ff99..b75e99ae6ac706971d4887a16d9957a1e20d5686 100644 --- a/drivers/crypto/msm/qcedev_smmu.c +++ b/drivers/crypto/msm/qcedev_smmu.c @@ -73,7 +73,7 @@ static int qcedev_setup_context_bank(struct context_bank_info *cb, pr_info("%s Attached %s and create mapping\n", __func__, dev_name(dev)); pr_info("%s Context Bank name:%s, is_secure:%d, start_addr:%#x\n", __func__, cb->name, cb->is_secure, cb->start_addr); - pr_info("%s size:%#x, dev:%pK, mapping:%pK\n", __func__, cb->size, + pr_debug("%s size:%#x, dev:%pK, mapping:%pK\n", __func__, cb->size, cb->dev, cb->mapping); return rc; diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c index 184f33f0ef18b7306944db556a3c640b1dbb5e3e..409154e770f2cacab8ba0a16e43134dd32da5684 100644 --- a/drivers/devfreq/governor_memlat.c +++ b/drivers/devfreq/governor_memlat.c @@ -323,7 +323,7 @@ static int devfreq_memlat_get_freq(struct devfreq *df, return 0; } -gov_attr(ratio_ceil, 1U, 10000U); +gov_attr(ratio_ceil, 1U, 20000U); gov_attr(stall_floor, 0U, 100U); static struct attribute *memlat_dev_attr[] = { diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index fad0b3fd7dc35561255aca846335e879fa2b4b7d..2cbd87b7fbdbaf78964cb73fb91af4d4773978ae 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -57,7 +57,8 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, stat->total_time * dfso_upthreshold) *freq = max; else if (stat->busy_time * 100 < - stat->total_time * dfso_downdifferential) + stat->total_time * + (dfso_upthreshold - dfso_downdifferential)) *freq = min; else *freq = df->previous_freq; diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ed3b785bae37e1cd589697f04d1b868dc2fccd9b..dbaa78b3ae89ec6137614c1cfd7eeca675c3a65b 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -30,4 +30,15 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. +config DEBUG_DMA_BUF_REF + bool "DEBUG Reference Count" + depends on STACKDEPOT + depends on DMA_SHARED_BUFFER + default n + ---help--- + Save stack traces for every call to dma_buf_get and dma_buf_put, to + help debug memory leaks. Potential leaks may be found by manually + matching the get/put call stacks. This feature consumes extra memory + in order to save the stack traces using STACKDEPOT. + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index c33bf88631479411549ab38cb96d748784e3f74c..dcbc33fb4db09cb4fea923ae9f7d54374e46b848 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,4 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o diff --git a/drivers/dma-buf/dma-buf-ref.c b/drivers/dma-buf/dma-buf-ref.c new file mode 100644 index 0000000000000000000000000000000000000000..76047104faca4a49e46d66202ff048d1a99024f7 --- /dev/null +++ b/drivers/dma-buf/dma-buf-ref.c @@ -0,0 +1,123 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#define DMA_BUF_STACK_DEPTH (16) + +struct dma_buf_ref { + struct list_head list; + depot_stack_handle_t handle; + int count; +}; + +void dma_buf_ref_init(struct dma_buf *dmabuf) +{ + INIT_LIST_HEAD(&dmabuf->refs); +} + +void dma_buf_ref_destroy(struct dma_buf *dmabuf) +{ + struct dma_buf_ref *r, *n; + + mutex_lock(&dmabuf->lock); + list_for_each_entry_safe(r, n, &dmabuf->refs, list) { + list_del(&r->list); + kfree(r); + } + mutex_unlock(&dmabuf->lock); +} + +static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf, + depot_stack_handle_t handle, + int count) +{ + struct dma_buf_ref *r; + + mutex_lock(&dmabuf->lock); + list_for_each_entry(r, &dmabuf->refs, list) { + if (r->handle == handle) { + r->count += count; + goto out; + } + } + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + goto out; + + INIT_LIST_HEAD(&r->list); + r->handle = handle; + r->count = count; + list_add(&r->list, &dmabuf->refs); + +out: + mutex_unlock(&dmabuf->lock); +} + +void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr) +{ + unsigned long entries[DMA_BUF_STACK_DEPTH]; + struct stack_trace trace = { + .nr_entries = 0, + .entries = entries, + .max_entries = DMA_BUF_STACK_DEPTH, + .skip = 1 + }; + depot_stack_handle_t handle; + + save_stack_trace(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + handle = depot_save_stack(&trace, GFP_KERNEL); + if (!handle) + return; + + dma_buf_ref_insert_handle(dmabuf, handle, nr); +} + +/** + * Called with dmabuf->lock held + */ +int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf) +{ + char *buf; + struct dma_buf_ref *ref; + int count = 0; + struct stack_trace trace; + + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + + list_for_each_entry(ref, &dmabuf->refs, list) { + count += ref->count; + + seq_printf(s, "References: %d\n", ref->count); + depot_fetch_stack(ref->handle, &trace); + snprint_stack_trace(buf, PAGE_SIZE, &trace, 0); + seq_puts(s, buf); + seq_putc(s, '\n'); + } + + seq_printf(s, "Total references: %d\n\n\n", count); + free_page((unsigned long)buf); + + return 0; +} diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index fb05cc57d102634f2a52d832b73f6f8e413906a6..c7eadb3efe6daa1605f4ff9987752e8819ff56d6 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -93,6 +93,8 @@ static int dma_buf_release(struct inode *inode, struct file *file) list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); + dma_buf_ref_destroy(dmabuf); + if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) reservation_object_fini(dmabuf->resv); @@ -474,7 +476,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; dmabuf->name = bufname; - getnstimeofday(&dmabuf->ctime); + dmabuf->ktime = ktime_get(); if (!resv) { resv = (struct reservation_object *)&dmabuf[1]; @@ -495,6 +497,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); + dma_buf_ref_init(dmabuf); + dma_buf_ref_mod(dmabuf, 1); + mutex_lock(&db_list.lock); list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); @@ -556,6 +561,7 @@ struct dma_buf *dma_buf_get(int fd) fput(file); return ERR_PTR(-EINVAL); } + dma_buf_ref_mod(file->private_data, 1); return file->private_data; } @@ -576,6 +582,7 @@ void dma_buf_put(struct dma_buf *dmabuf) if (WARN_ON(!dmabuf || !dmabuf->file)) return; + dma_buf_ref_mod(dmabuf, -1); fput(dmabuf->file); } EXPORT_SYMBOL_GPL(dma_buf_put); @@ -1266,6 +1273,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "Total %d devices attached\n\n", attach_count); + dma_buf_ref_show(s, buf_obj); + count++; size += buf_obj->size; mutex_unlock(&buf_obj->lock); @@ -1326,24 +1335,21 @@ static int get_dma_info(const void *data, struct file *file, unsigned int n) static void write_proc(struct seq_file *s, struct dma_proc *proc) { struct dma_info *tmp; - struct timespec curr_time; - getnstimeofday(&curr_time); seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n", proc->name, proc->pid, proc->size); seq_printf(s, "%-8s\t%-8s\t%-8s\n", "Name", "Size (KB)", "Time Alive (sec)"); list_for_each_entry(tmp, &proc->dma_bufs, head) { - struct dma_buf *dmabuf; - struct timespec mtime; - __kernel_time_t elap_mtime; + struct dma_buf *dmabuf = tmp->dmabuf; + ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime); - dmabuf = tmp->dmabuf; - mtime = dmabuf->ctime; - elap_mtime = curr_time.tv_sec - mtime.tv_sec; + elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC); seq_printf(s, "%-8s\t%-8ld\t%-8ld\n", - dmabuf->name, dmabuf->size / SZ_1K, elap_mtime); + dmabuf->name, + dmabuf->size / SZ_1K, + elapmstime); } } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index c4e5a0899a4ea988c918334fb0dc0c3fe3b1ebd4..694f2b7bb862f6f1bddcf8ee5f36a78ec6d27634 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -492,6 +492,39 @@ config EDAC_KRYO_ARM64_PANIC_ON_UE For production builds, you should probably say 'N' here. +config EDAC_GIC + depends on ARM_GIC_V3 + tristate "GIC ECC" + help + Supports error detection and correction on the + GIC RAMs. Reports correctable and uncorrectable errors caught by + GIC ECC mechanism. GICT(GIC Trace and debug) register map page + describes the syndrome registers GIC and GICT support is available + from GIC600 onwards. + For debugging issues having to do with stability and overall system + health, you should probably say 'Y' here. + +config EDAC_GIC_PANIC_ON_CE + depends on EDAC_GIC + bool "Panic on correctable errors - GIC" + help + Forcibly cause a kernel panic if an correctable error (CE) is + detected, even though the error is (by definition) correctable and + would otherwise result in no adverse system effects. This can reduce + debugging times on hardware which may be operating at voltages or + frequencies outside normal specification. + For production builds, you should definitely say 'N' here. + +config EDAC_GIC_PANIC_ON_UE + depends on EDAC_GIC + bool "Panic on un-correctable errors - GIC" + help + Forcibly cause a kernel panic if an uncorrectable error (UE) is + detected. This can reduce debugging times on hardware which may + be operating at voltages or frequencies outside + normal specification. + For production builds, you should definitely say 'N' here. + config EDAC_XGENE tristate "APM X-Gene SoC" depends on (ARM64 || COMPILE_TEST) diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 57bf313997ac1a0ba86ef75604ef003f8e831c70..10a5ca3dc91a641840faa4d289df49026b8578b1 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_EDAC_X38) += x38_edac.o obj-$(CONFIG_EDAC_I82860) += i82860_edac.o obj-$(CONFIG_EDAC_R82600) += r82600_edac.o obj-$(CONFIG_EDAC_KRYO_ARM64) += kryo_arm64_edac.o +obj-$(CONFIG_EDAC_GIC) += gic600_edac.o amd64_edac_mod-y := amd64_edac.o amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o diff --git a/drivers/edac/gic600_edac.c b/drivers/edac/gic600_edac.c new file mode 100644 index 0000000000000000000000000000000000000000..ee86771ae00e715926925919f565f4163be7948e --- /dev/null +++ b/drivers/edac/gic600_edac.c @@ -0,0 +1,448 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "GICT: %s(): " fmt, __func__ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "edac_mc.h" +#include "edac_device.h" + +#ifdef CONFIG_EDAC_GIC_PANIC_ON_CE +#define GICT_PANIC_ON_CE 1 +#else +#define GICT_PANIC_ON_CE 0 +#endif + +#ifdef CONFIG_EDAC_GIC_PANIC_ON_UE +#define GICT_PANIC_ON_UE 1 +#else +#define GICT_PANIC_ON_UE 0 +#endif + +#define EDAC_NODE "GICT" +#define RECORD_WIDTH(n) (64 * n) +#define GICT_ERR_FR(n) (RECORD_WIDTH(n) + 0x00) +#define GICT_ERR_CTRL(n) (RECORD_WIDTH(n) + 0x08) +#define GICT_ERR_STATUS(n) (RECORD_WIDTH(n) + 0x10) +#define GICT_ERR_ADDR(n) (RECORD_WIDTH(n) + 0x18) +#define GICT_ERR_MISC0(n) (RECORD_WIDTH(n) + 0x20) +#define GICT_ERR_MISC1(n) (RECORD_WIDTH(n) + 0x28) +#define GICT_ERRGSR 0xE000 +#define GICT_ERRDEVARCH 0xFFBC +#define GICT_ERRIDR 0xFFC8 +#define GICT_ERRIRQCR0 0xE800 +#define GICT_ERRIRQCR1 (GICT_ERRIRQCR0+0x8) + +#define GICD_FCTLR 0x0020 +#define GICR_FCTLR 0x0020 +#define GICR_OFFSET 0x20000 + + +#define RECORD0 "Software error in GICD programming(UEO)" +#define RECORD1 "Correctable SPI RAM errors(CE)" +#define RECORD2 "Uncorrectable SPI RAM errors(UER)" +#define RECORD3 "Correctable SGI RAM errors(CE)" +#define RECORD4 "Uncorrectable SGI RAM errors(UER)" +#define RECORD5 "Correctable TGT cache errors(CE)" +#define RECORD6 "Correctable TGT cache errors(UER)" +#define RECORD7 "Correctable PPI RAM errors(CE)" +#define RECORD8 "Uncorrectable PPI RAM errors(UER)" +#define RECORD9 "Correctable LPI RAM errors(CE)" +#define RECORD10 "Uncorrectable LPI RAM errors(UER)" +#define RECORD11 "Correctable ITS RAM errors(CE)" +#define RECORD12 "Uncorrectable ITS RAM errors(UEO)" +#define RECORD13 "Uncorrectable 2xITS RAM errors(UER)" +#define RECORD14 "Uncorrectable 2xITS RAM errors(UER)" +#define RECORD(N) RECORD##N + +#define ERR_FR_UI_MASK GENMASK(5, 4) +#define ERR_FR_FI_MASK GENMASK(7, 6) +#define ERR_FR_UE_MASK GENMASK(9, 8) +#define ERR_FR_CFI_MASK GENMASK(11, 10) + +#define ERR_CTRL_UI_MASK BIT(2) +#define ERR_CTRL_FI_MASK BIT(3) +#define ERR_CTRL_UE_MASK BIT(4) +#define ERR_CTRL_CFI_MASK BIT(8) + +#define ERR_STATUS_SERR_MASK GENMASK(7, 0) +#define ERR_STATUS_IERR_MASK GENMASK(15, 8) +#define ERR_STATUS_UET_MASK GENMASK(21, 20) +#define ERR_STATUS_CE_MASK GENMASK(25, 24) +#define ERR_STATUS_MV_MASK BIT(26) +#define ERR_STATUS_OF_MASK BIT(27) +#define ERR_STATUS_ER_MASK BIT(28) +#define ERR_STATUS_UE_MASK BIT(29) +#define ERR_STATUS_V_MASK BIT(30) +#define ERR_STATUS_AV_MASK BIT(31) + +#define ERR_MISC0_COUNT_SHIFT 32 +#define ERR_MISC0_COUNT_MASK GENMASK(39, 32) +#define FAULT_THRESHOLD 0xFF + +#define ERR_IRQCR_SPIID_MASK GENMASK(9, 0) + +#define ERR_ADDR_PADDR_MASK GENMASK(47, 0) +#define ERR_ADDR_NS_MASK BIT(63) + +struct errors_edac { + const char *const msg; + void (*func)(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg); +}; +#define GICT_IRQS 2 +#define GICT_FAULT_IRQ_IDX 0 +#define GICT_ERROR_IRQ_IDX 1 +#define GICT_BUFFER_SIZE 32 +static u32 gict_buffer[GICT_BUFFER_SIZE] __aligned(PAGE_SIZE); + +static void gict_spi_recovery(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg); + +static const struct errors_edac errors[] = { + {RECORD(0), edac_device_handle_ue }, + {RECORD(1), edac_device_handle_ce }, + {RECORD(2), gict_spi_recovery }, + {RECORD(3), edac_device_handle_ce }, + {RECORD(4), edac_device_handle_ue }, + {RECORD(5), edac_device_handle_ce }, + {RECORD(6), edac_device_handle_ce }, + {RECORD(7), edac_device_handle_ce }, + {RECORD(8), edac_device_handle_ue }, + {RECORD(9), edac_device_handle_ce }, + {RECORD(10), edac_device_handle_ue }, + {RECORD(11), edac_device_handle_ce }, + {RECORD(12), edac_device_handle_ue }, + {RECORD(13), edac_device_handle_ue }, + {RECORD(14), edac_device_handle_ue }, +}; + +struct erp_drvdata { + struct edac_device_ctl_info *edev_ctl; + void __iomem *base; + u32 interrupt_config[GICT_IRQS]; + u32 max_records; + struct mutex mutex; +}; +static struct erp_drvdata *gict_edac; + +static inline void gict_write(struct erp_drvdata *drv, u32 val, u32 off) +{ + writel_relaxed(val, drv->base + off); +} + +static inline u32 gict_read(struct erp_drvdata *drv, u32 off) +{ + return readl_relaxed(drv->base + off); +} + +static inline u64 gict_readq(struct erp_drvdata *drv, u32 off) +{ + return readq_relaxed(drv->base + off); +} + +static inline void gict_writeq(struct erp_drvdata *drv, u64 val, u32 off) +{ + writeq_relaxed(val, drv->base + off); +} + +#define for_each_bit(i, buf, max) \ + for (i = find_first_bit((unsigned long *)buf, max); \ + i < max; \ + i = find_next_bit((unsigned long *)buf, max, i + 1)) + +#define SCM_SVC_GIC 0x1D +#define GIC_ERROR_RECOVERY_SMC_ID 0x01 +#define MAX_IRQS 1023 +/* RECORD(2) - Recovery of GIC SPI interrupts */ +static void gict_spi_recovery(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg) +{ + struct scm_desc desc; + u64 i; + int ret; + + /* + * SPIs that are in the error state can be determined + * by reading the GICD_IERRRn register. + * Secure side reads GICD_IERRRn register into gict_buffer. + */ + memset(&gict_buffer, 0, sizeof(gict_buffer)); + desc.args[0] = virt_to_phys(gict_buffer); + desc.args[1] = sizeof(gict_buffer); + desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_GIC, + GIC_ERROR_RECOVERY_SMC_ID), &desc); + + if (ret) { + pr_warn("Recovery SCM call failed\n"); + return; + } + + for_each_bit(i, gict_buffer, MAX_IRQS) + pr_warn("GICT: Corrupted SPI:%d", i); +} + +static inline void gict_dump_err_record(int record, u64 errxstatus, + u64 errxmisc0, u64 errxmisc1, u64 errxaddr) +{ + edac_printk(KERN_INFO, EDAC_NODE, "RECORD: %d", record); + edac_printk(KERN_INFO, EDAC_NODE, "%s\n", errors[record].msg); + edac_printk(KERN_INFO, EDAC_NODE, "ERRXSTATUS: %llx\n", errxstatus); + edac_printk(KERN_INFO, EDAC_NODE, "ERRXMISC0: %llx\n", errxmisc0); + edac_printk(KERN_INFO, EDAC_NODE, "ERRXMISC1: %llx\n", errxmisc1); + edac_printk(KERN_INFO, EDAC_NODE, "ERRXADDR: %llx\n", errxaddr); +} + +static void handle_record(int record, int level, + struct edac_device_ctl_info *edev_ctl) +{ + errors[record].func(edev_ctl, raw_smp_processor_id(), + level, errors[record].msg); +} + +static void gict_check_error_records(struct erp_drvdata *drv) +{ + u64 errxstatus, errxaddr, errxmisc0, errxmisc1, errgsr, i; + + errgsr = gict_readq(drv, GICT_ERRGSR); + /* + * Lets first dump all error records + * details which are having a valid status. + */ + for_each_bit(i, &errgsr, drv->max_records) { + errxstatus = gict_readq(drv, GICT_ERR_STATUS(i)); + if (errxstatus & ERR_STATUS_V_MASK) { + errxaddr = gict_readq(drv, GICT_ERR_ADDR(i)); + errxmisc0 = gict_readq(drv, GICT_ERR_MISC0(i)); + errxmisc1 = gict_readq(drv, GICT_ERR_MISC1(i)); + gict_dump_err_record(i, errxstatus, + errxmisc0, errxmisc1, errxaddr); + } + } +} + +static void gict_handle_records(struct erp_drvdata *drv) +{ + u64 errxstatus, errgsr, errxstatus_ack, i; + + errgsr = gict_readq(drv, GICT_ERRGSR); + + for_each_bit(i, &errgsr, drv->max_records) { + errxstatus = gict_readq(drv, GICT_ERR_STATUS(i)); + errxstatus_ack = 0; + if (errxstatus & ERR_STATUS_V_MASK) { + handle_record(i, 0, drv->edev_ctl); + errxstatus_ack = ERR_STATUS_CE_MASK | + ERR_STATUS_UE_MASK | ERR_STATUS_UET_MASK | + ERR_STATUS_OF_MASK | ERR_STATUS_MV_MASK | + ERR_STATUS_AV_MASK | ERR_STATUS_V_MASK; + gict_write(drv, errxstatus_ack, GICT_ERR_STATUS(i)); + } + + } +} + +static void configure_thresholds(struct erp_drvdata *drv) +{ + u64 errxmisc0, i; + + /* + * Configure ERRXMISC0 count to 0xFF so that fault + * interrupt raised on every error observed. + */ + for (i = 0; i <= drv->max_records; i++) { + errxmisc0 = gict_readq(drv, GICT_ERR_MISC0(i)); + errxmisc0 |= (u64)FAULT_THRESHOLD << ERR_MISC0_COUNT_SHIFT; + gict_writeq(drv, errxmisc0, GICT_ERR_MISC0(i)); + } + +} + +static irqreturn_t gict_fault_handler(int irq, void *drvdata) +{ + struct erp_drvdata *drv = drvdata; + + mutex_lock(&drv->mutex); + gict_check_error_records(drv); + gict_handle_records(drv); + configure_thresholds(drv); + mutex_unlock(&drv->mutex); + return IRQ_HANDLED; +} + +static void configure_ctrl_registers(struct erp_drvdata *drv) +{ + u64 errxfr, errxctrl, i; + + configure_thresholds(drv); + + /* Configure Control Register for CFI, UE, FI and UI faults/errors. */ + for (i = 0; i <= drv->max_records; i++) { + errxfr = gict_readq(drv, GICT_ERR_FR(i)); + errxctrl = gict_readq(drv, GICT_ERR_CTRL(i)); + /* Configure to enable Correctable Fault Interrupt(CFI) */ + if (errxfr & ERR_FR_CFI_MASK) + errxctrl = errxctrl | ERR_CTRL_CFI_MASK; + /* Configure to enable Uncorrectable Error(UE) */ + if (errxfr & ERR_FR_UE_MASK) + errxctrl = errxctrl | ERR_CTRL_UE_MASK; + /* Configure to enable Uncorrectable Fault Interrupt(FI) */ + if (errxfr & ERR_FR_FI_MASK) + errxctrl = errxctrl | ERR_CTRL_FI_MASK; + /* Configure to enable Uncorrectable Interrupt(UI) */ + if (errxfr & ERR_FR_UI_MASK) + errxctrl = errxctrl | ERR_CTRL_UI_MASK; + + gict_writeq(drv, errxctrl, GICT_ERR_CTRL(i)); + } +} + +static int gic_erp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct erp_drvdata *drv; + struct resource *res; + int ret, errirq, faultirq; + u64 errirqcrx; + const char *err_irqname = "gict-err"; + const char *fault_irqname = "gict-fault"; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gict-base"); + if (!res) + return -ENXIO; + + drv->base = devm_ioremap_resource(dev, res); + if (IS_ERR_OR_NULL(drv->base)) + return -EBUSY; + + ret = of_property_read_u32_array(dev->of_node, + "interrupt-config", drv->interrupt_config, GICT_IRQS); + if (ret) + return -ENXIO; + + errirq = platform_get_irq_byname(pdev, err_irqname); + if (errirq < 0) + return errirq; + + faultirq = platform_get_irq_byname(pdev, fault_irqname); + if (faultirq < 0) + return faultirq; + + drv->edev_ctl = edac_device_alloc_ctl_info(0, "gic", + 1, "T", 1, 1, NULL, 0, + edac_device_alloc_index()); + + if (!drv->edev_ctl) + return -ENOMEM; + + drv->edev_ctl->dev = dev; + drv->edev_ctl->mod_name = dev_name(dev); + drv->edev_ctl->dev_name = dev_name(dev); + drv->edev_ctl->ctl_name = "GICT"; + drv->edev_ctl->panic_on_ce = GICT_PANIC_ON_CE; + drv->edev_ctl->panic_on_ue = GICT_PANIC_ON_UE; + platform_set_drvdata(pdev, drv); + gict_edac = drv; + + mutex_init(&drv->mutex); + ret = edac_device_add_device(drv->edev_ctl); + if (ret) + goto out_mem; + + /* + * Find no of error records supported by GICT from ERRIDR register + * and configure control registers for all records supported. + */ + drv->max_records = gict_read(drv, GICT_ERRIDR); + configure_ctrl_registers(drv); + + /* Configure GICT_ERRIRQCR0 register with fault_interrupt number */ + errirqcrx = gict_readq(drv, GICT_ERRIRQCR0); + errirqcrx |= (drv->interrupt_config[GICT_FAULT_IRQ_IDX] + & ERR_IRQCR_SPIID_MASK); + gict_writeq(drv, errirqcrx, GICT_ERRIRQCR0); + + /* Configure GICT_ERRIRQCR1 register with err_interrupt number */ + errirqcrx = gict_readq(drv, GICT_ERRIRQCR1); + errirqcrx |= (drv->interrupt_config[GICT_ERROR_IRQ_IDX] + & ERR_IRQCR_SPIID_MASK); + gict_writeq(drv, errirqcrx, GICT_ERRIRQCR1); + + ret = devm_request_threaded_irq(&pdev->dev, faultirq, + NULL, gict_fault_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, fault_irqname, drv); + if (ret) { + dev_err(dev, "Failed to request %s IRQ %d: %d\n", + fault_irqname, res->start, ret); + goto out_dev; + } + + ret = devm_request_threaded_irq(&pdev->dev, errirq, + NULL, gict_fault_handler, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, err_irqname, drv); + if (ret) { + dev_err(dev, "Failed to request %s IRQ %d: %d\n", + err_irqname, res->start, ret); + goto out_dev; + } + return ret; + +out_dev: + edac_device_del_device(dev); +out_mem: + edac_device_free_ctl_info(drv->edev_ctl); + return ret; +} + +static int gic_erp_remove(struct platform_device *pdev) +{ + struct erp_drvdata *drv = dev_get_drvdata(&pdev->dev); + struct edac_device_ctl_info *edac_ctl = drv->edev_ctl; + + edac_device_del_device(edac_ctl->dev); + edac_device_free_ctl_info(edac_ctl); + + return 0; +} + +static const struct of_device_id gic_edac_match[] = { + { .compatible = "arm,gic-600-erp", }, + { } +}; +MODULE_DEVICE_TABLE(of, gic_edac_match); + +static struct platform_driver gic_edac_driver = { + .probe = gic_erp_probe, + .remove = gic_erp_remove, + .driver = { + .name = "gic_erp", + .of_match_table = of_match_ptr(gic_edac_match), + }, +}; +module_platform_driver(gic_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("GICT driver"); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 0e2011636fbbb4af579a895227fe6b9fef8e27cf..c53c7ac992f86fbcfa69f425e031679e3a3d5840 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev) { pr_debug("fw_cfg: unloading.\n"); fw_cfg_sysfs_cache_cleanup(); + sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr); + fw_cfg_io_cleanup(); fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset); fw_cfg_kobj_cleanup(fw_cfg_sel_ko); - fw_cfg_io_cleanup(); return 0; } diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 3bf65288ffffd51719d0c4e8ce934ccd2d2f59e3..2fdf302ebdad096e39f1bf2e853313ffe14a0caa 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -62,6 +62,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index a2780aff5b3110edfc11bb9aa6f54adb8fde73e2..190fa02b0550582213d37c67b602f854d7f2d9c6 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ #include +#include #include "dp_power.h" #include "dp_catalog.h" @@ -35,6 +36,8 @@ struct dp_debug_private { u8 *dpcd; u32 dpcd_size; + u32 mst_con_id; + char exe_mode[SZ_32]; char reg_dump[SZ_32]; @@ -149,7 +152,7 @@ static ssize_t dp_debug_write_edid(struct file *file, goto bail; } - if (edid_buf_index < debug->edid_size) + if (debug->edid && (edid_buf_index < debug->edid_size)) debug->edid[edid_buf_index++] = d; buf_t += char_to_nib; @@ -364,6 +367,112 @@ static ssize_t dp_debug_write_edid_modes(struct file *file, return len; } +static ssize_t dp_debug_write_edid_modes_mst(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char buf[SZ_32]; + char *read_buf; + size_t len = 0; + + int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio = 0; + int con_id = 0, offset = 0, debug_en = 0; + bool in_list = false; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + read_buf = buf; + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + while (sscanf(read_buf, "%d %d %d %d %d %d%n", &debug_en, &con_id, + &hdisplay, &vdisplay, &vrefresh, &aspect_ratio, + &offset) == 6) { + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, + list) { + if (mst_connector->con_id == con_id) { + in_list = true; + mst_connector->debug_en = (bool) debug_en; + mst_connector->hdisplay = hdisplay; + mst_connector->vdisplay = vdisplay; + mst_connector->vrefresh = vrefresh; + mst_connector->aspect_ratio = aspect_ratio; + } + } + + if (!in_list) + pr_debug("dp connector id %d is invalid\n", con_id); + + in_list = false; + read_buf += offset; + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); +end: + return len; +} + +static ssize_t dp_debug_write_mst_con_id(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char buf[SZ_32]; + size_t len = 0; + int con_id = 0; + bool in_list = false; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto clear; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &con_id) != 0) + goto clear; + + if (!con_id) + goto clear; + + /* Verify that the connector id is for a valid mst connector. */ + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == con_id) { + in_list = true; + debug->mst_con_id = con_id; + break; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) + pr_err("invalid connector id %u\n", con_id); + + goto end; +clear: + pr_debug("clearing mst_con_id\n"); + debug->mst_con_id = 0; +end: + return len; +} + static ssize_t dp_debug_bw_code_write(struct file *file, const char __user *user_buff, size_t count, loff_t *ppos) { @@ -733,6 +842,183 @@ static ssize_t dp_debug_read_edid_modes(struct file *file, return rc; } +static ssize_t dp_debug_read_edid_modes_mst(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + struct drm_display_mode *mode; + bool in_list = false; + + if (!debug) { + pr_err("invalid data\n"); + rc = -ENODEV; + goto error; + } + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + if (mst_connector->con_id == debug->mst_con_id) { + connector = mst_connector->conn; + in_list = true; + } + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (!in_list) { + pr_err("connector %u not in mst list\n", debug->mst_con_id); + rc = -EINVAL; + goto error; + } + + if (!connector) { + pr_err("connector is NULL\n"); + rc = -EINVAL; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&connector->dev->mode_config.mutex); + list_for_each_entry(mode, &connector->modes, head) { + ret = snprintf(buf + len, max_size, + "%s %d %d %d %d %d 0x%x\n", + mode->name, mode->vrefresh, + mode->picture_aspect_ratio, mode->htotal, + mode->vtotal, mode->clock, mode->flags); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&connector->dev->mode_config.mutex); + + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_mst_con_id(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + + if (!debug) { + pr_err("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + ret = snprintf(buf, max_size, "%u\n", debug->mst_con_id); + len += ret; + + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_mst_conn_info(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_mst_connector *mst_connector; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + + if (!debug) { + pr_err("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&debug->dp_debug.dp_mst_connector_list.lock); + list_for_each_entry(mst_connector, + &debug->dp_debug.dp_mst_connector_list.list, list) { + /* Do not print info for head node */ + if (mst_connector->con_id == -1) + continue; + + connector = mst_connector->conn; + + if (!connector) { + pr_err("connector for id %d is NULL\n", + mst_connector->con_id); + continue; + } + + ret = snprintf(buf + len, max_size, + "conn name:%s, conn id:%d\n", connector->name, + connector->base.id); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&debug->dp_debug.dp_mst_connector_list.lock); + + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, size_t count, loff_t *ppos) { @@ -1229,6 +1515,23 @@ static const struct file_operations edid_modes_fops = { .write = dp_debug_write_edid_modes, }; +static const struct file_operations edid_modes_mst_fops = { + .open = simple_open, + .read = dp_debug_read_edid_modes_mst, + .write = dp_debug_write_edid_modes_mst, +}; + +static const struct file_operations mst_conn_info_fops = { + .open = simple_open, + .read = dp_debug_read_mst_conn_info, +}; + +static const struct file_operations mst_con_id_fops = { + .open = simple_open, + .read = dp_debug_read_mst_con_id, + .write = dp_debug_write_mst_con_id, +}; + static const struct file_operations hpd_fops = { .open = simple_open, .write = dp_debug_write_hpd, @@ -1348,6 +1651,33 @@ static int dp_debug_init(struct dp_debug *dp_debug) goto error_remove_dir; } + file = debugfs_create_file("edid_modes_mst", 0644, dir, + debug, &edid_modes_mst_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + pr_err("[%s] debugfs create edid_modes_mst failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_id", 0644, dir, + debug, &mst_con_id_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + pr_err("[%s] debugfs create mst_con_id failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_con_info", 0644, dir, + debug, &mst_conn_info_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + pr_err("[%s] debugfs create mst_conn_info failed, rc=%d\n", + DEBUG_NAME, rc); + goto error_remove_dir; + } + file = debugfs_create_file("hpd", 0644, dir, debug, &hpd_fops); if (IS_ERR_OR_NULL(file)) { @@ -1557,6 +1887,16 @@ struct dp_debug *dp_debug_get(struct dp_debug_in *in) dp_debug->get_edid = dp_debug_get_edid; + INIT_LIST_HEAD(&dp_debug->dp_mst_connector_list.list); + + /* + * Do not associate the head of the list with any connector in order to + * maintain backwards compatibility with the SST use case. + */ + dp_debug->dp_mst_connector_list.con_id = -1; + dp_debug->dp_mst_connector_list.conn = NULL; + dp_debug->dp_mst_connector_list.debug_en = false; + return dp_debug; error: return ERR_PTR(rc); diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index f14b9749bb8e3672009098ff20d457c1d5515845..340f01546ae837793ef32796aef591505c26533d 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -19,6 +19,7 @@ #include "dp_link.h" #include "dp_usbpd.h" #include "dp_aux.h" +#include "dp_display.h" /** * struct dp_debug @@ -43,6 +44,7 @@ struct dp_debug { u32 max_pclk_khz; bool force_encryption; char hdcp_status[SZ_128]; + struct dp_mst_connector dp_mst_connector_list; u8 *(*get_edid)(struct dp_debug *dp_debug); }; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index f2c884e93c8c1d391dfb955e908633a92dff01bf..5bcbfd5a3bb709c45b90d3c84aee26ca6a485c8f 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -231,6 +231,10 @@ static void dp_display_hdcp_cb_work(struct work_struct *work) u32 hdcp_auth_state; dp = container_of(dw, struct dp_display_private, hdcp_cb_work); + + if (!dp->power_on || atomic_read(&dp->aborted)) + return; + status = &dp->link->hdcp_status; if (status->hdcp_state == HDCP_STATE_INACTIVE) { @@ -294,8 +298,10 @@ static void dp_display_notify_hdcp_status_cb(void *ptr, dp->link->hdcp_status.hdcp_state = state; - if (dp->is_connected) + mutex_lock(&dp->session_lock); + if (dp->power_on && !atomic_read(&dp->aborted)) queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4); + mutex_unlock(&dp->session_lock); } static void dp_display_check_source_hdcp_caps(struct dp_display_private *dp) @@ -1644,6 +1650,9 @@ static enum drm_mode_status dp_display_validate_mode( struct dp_panel *dp_panel; struct dp_debug *debug; enum drm_mode_status mode_status = MODE_BAD; + bool in_list = false; + struct dp_mst_connector *mst_connector; + int hdis, vdis, vref, ar, _hdis, _vdis, _vref, _ar; if (!dp_display || !mode || !panel) { pr_err("invalid params\n"); @@ -1687,6 +1696,58 @@ static enum drm_mode_status dp_display_validate_mode( goto end; } + /* + * If the connector exists in the mst connector list and if debug is + * enabled for that connector, use the mst connector settings from the + * list for validation. Otherwise, use non-mst default settings. + */ + mutex_lock(&debug->dp_mst_connector_list.lock); + + if (list_empty(&debug->dp_mst_connector_list.list)) { + mutex_unlock(&debug->dp_mst_connector_list.lock); + goto verify_default; + } + + list_for_each_entry(mst_connector, &debug->dp_mst_connector_list.list, + list) { + if (mst_connector->con_id == dp_panel->connector->base.id) { + in_list = true; + + if (!mst_connector->debug_en) { + mode_status = MODE_OK; + mutex_unlock( + &debug->dp_mst_connector_list.lock); + goto end; + } + + hdis = mst_connector->hdisplay; + vdis = mst_connector->vdisplay; + vref = mst_connector->vrefresh; + ar = mst_connector->aspect_ratio; + + _hdis = mode->hdisplay; + _vdis = mode->vdisplay; + _vref = mode->vrefresh; + _ar = mode->picture_aspect_ratio; + + if (hdis == _hdis && vdis == _vdis && vref == _vref && + ar == _ar) { + mode_status = MODE_OK; + mutex_unlock( + &debug->dp_mst_connector_list.lock); + goto end; + } + + break; + } + } + + mutex_unlock(&debug->dp_mst_connector_list.lock); + + if (in_list) + goto end; + +verify_default: if (debug->debug_en && (mode->hdisplay != debug->hdisplay || mode->vdisplay != debug->vdisplay || mode->vrefresh != debug->vrefresh || @@ -1884,6 +1945,7 @@ static int dp_display_mst_connector_install(struct dp_display *dp_display, struct dp_panel_in panel_in; struct dp_panel *dp_panel; struct dp_display_private *dp; + struct dp_mst_connector *mst_connector; if (!dp_display || !connector) { pr_err("invalid input\n"); @@ -1892,8 +1954,11 @@ static int dp_display_mst_connector_install(struct dp_display *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); + mutex_lock(&dp->session_lock); + if (!dp->mst.drm_registered) { pr_debug("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); return -EPERM; } @@ -1908,6 +1973,7 @@ static int dp_display_mst_connector_install(struct dp_display *dp_display, if (IS_ERR(dp_panel)) { rc = PTR_ERR(dp_panel); pr_err("failed to initialize panel, rc = %d\n", rc); + mutex_unlock(&dp->session_lock); return rc; } @@ -1916,12 +1982,28 @@ static int dp_display_mst_connector_install(struct dp_display *dp_display, rc = PTR_ERR(dp_panel->audio); pr_err("[mst] failed to initialize audio, rc = %d\n", rc); dp_panel->audio = NULL; + mutex_unlock(&dp->session_lock); return rc; } DP_MST_DEBUG("dp mst connector installed. conn:%d\n", connector->base.id); + mutex_lock(&dp->debug->dp_mst_connector_list.lock); + + mst_connector = kmalloc(sizeof(struct dp_mst_connector), + GFP_KERNEL); + mst_connector->debug_en = false; + mst_connector->conn = connector; + mst_connector->con_id = connector->base.id; + INIT_LIST_HEAD(&mst_connector->list); + + list_add(&mst_connector->list, + &dp->debug->dp_mst_connector_list.list); + + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + return 0; } @@ -1932,6 +2014,7 @@ static int dp_display_mst_connector_uninstall(struct dp_display *dp_display, struct sde_connector *sde_conn; struct dp_panel *dp_panel; struct dp_display_private *dp; + struct dp_mst_connector *con_to_remove, *temp_con; if (!dp_display || !connector) { pr_err("invalid input\n"); @@ -1940,14 +2023,18 @@ static int dp_display_mst_connector_uninstall(struct dp_display *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); + mutex_lock(&dp->session_lock); + if (!dp->mst.drm_registered) { pr_debug("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); return -EPERM; } sde_conn = to_sde_connector(connector); if (!sde_conn->drv_panel) { pr_err("invalid panel for connector:%d\n", connector->base.id); + mutex_unlock(&dp->session_lock); return -EINVAL; } @@ -1958,6 +2045,19 @@ static int dp_display_mst_connector_uninstall(struct dp_display *dp_display, DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n", connector->base.id); + mutex_lock(&dp->debug->dp_mst_connector_list.lock); + + list_for_each_entry_safe(con_to_remove, temp_con, + &dp->debug->dp_mst_connector_list.list, list) { + if (con_to_remove->con_id == connector->base.id) { + list_del(&con_to_remove->list); + kfree(con_to_remove); + } + } + + mutex_unlock(&dp->debug->dp_mst_connector_list.lock); + mutex_unlock(&dp->session_lock); + return rc; } diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 0c293a70d560a5443ada0c2db4a1c859a4796e54..512778c747950199c66c2d5fba977cdbe9853d83 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -15,6 +15,7 @@ #ifndef _DP_DISPLAY_H_ #define _DP_DISPLAY_H_ +#include #include #include @@ -43,6 +44,18 @@ struct dp_mst_caps { struct drm_dp_aux *drm_aux; }; +struct dp_mst_connector { + bool debug_en; + int con_id; + int hdisplay; + int vdisplay; + int vrefresh; + int aspect_ratio; + struct drm_connector *conn; + struct mutex lock; + struct list_head list; +}; + struct dp_display { struct drm_device *drm_dev; struct dp_bridge *bridge; diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c index f40a814b8956747067ef8b87949c2c6b17ab8365..133d4a98fdecafda0e6ac00eb88216762b323379 100644 --- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c @@ -1112,7 +1112,7 @@ static int dp_mst_connector_atomic_check(struct drm_connector *connector, void *display, struct drm_connector_state *new_conn_state) { int rc = 0, slots, i; - struct drm_atomic_state *state = new_conn_state->state; + struct drm_atomic_state *state; struct drm_connector_state *old_conn_state; struct drm_crtc *old_crtc; struct drm_crtc_state *crtc_state; @@ -1124,6 +1124,11 @@ static int dp_mst_connector_atomic_check(struct drm_connector *connector, DP_MST_DEBUG("enter:\n"); + if (!new_conn_state) + return rc; + + state = new_conn_state->state; + old_conn_state = drm_atomic_get_old_connector_state(state, connector); if (!old_conn_state) @@ -1160,7 +1165,7 @@ static int dp_mst_connector_atomic_check(struct drm_connector *connector, } mode_set: - if (!new_conn_state || !new_conn_state->crtc) + if (!new_conn_state->crtc) return rc; crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c index c5c2a0c1bc6f270a34d534fa7c38e0cab6328861..11d72b1ead63b79f5f1c36b2fcade4410097f1f6 100644 --- a/drivers/gpu/drm/msm/dp/dp_usbpd.c +++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c @@ -467,13 +467,13 @@ static int dp_usbpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) struct dp_usbpd *dp_usbpd; struct dp_usbpd_private *pd; + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); if (!dp_usbpd) { pr_err("invalid input\n"); rc = -EINVAL; goto error; } - dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); pd->vdo = vdo; @@ -546,10 +546,10 @@ void dp_usbpd_put(struct dp_hpd *dp_hpd) struct dp_usbpd *dp_usbpd; struct dp_usbpd_private *usbpd; + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); if (!dp_usbpd) return; - dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c index 59248aa79136012e8da78ac3169733b9cdbfc3d9..0d722f4591fcf6e1c893613bc850be1ea30a8d96 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c @@ -70,6 +70,7 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl, ctrl->ops.wait_for_cmd_mode_mdp_idle = dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle; ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr; + ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk; switch (version) { case DSI_CTRL_VERSION_1_4: diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h index 203a2c9b595cc282c50cdfb8551a85cc63cdfd52..9a63ed89dcae69f74dd4ee59387f3d9e74b7b68e 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h @@ -245,4 +245,6 @@ bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl); void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable, enum dsi_clk_gate_type clk_selection); +void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable); + #endif /* _DSI_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index e8a25aa653c23db18ea06e92b4cd0aa636804cf3..85d1f0c8eff3e0cf63c5fb046ffe55c83d884758 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -2712,6 +2712,16 @@ void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable) mutex_unlock(&dsi_ctrl->ctrl_lock); } +void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable) +{ + if (!dsi_ctrl) + return; + + mutex_lock(&dsi_ctrl->ctrl_lock); + dsi_ctrl->hw.ops.set_continuous_clk(&dsi_ctrl->hw, enable); + mutex_unlock(&dsi_ctrl->ctrl_lock); +} + int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl) { if (!dsi_ctrl) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 5d2dc756e66ec8a3bc0f4d33bf9f5dd8da1148cb..573ac2967adc13e09b19f8e6e7cc036cfa29fd17 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -793,4 +793,11 @@ int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool en); * dsi_ctrl_pixel_format_to_bpp() - returns number of bits per pxl */ int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format); + +/** + * dsi_ctrl_set_continuous_clk() - API to set/unset force clock lane HS request. + * @dsi_ctrl: DSI controller handle. + * @enable: variable to control continuous clock. + */ +void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable); #endif /* _DSI_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h index 8c33e44beb0eb80d006d9c2a4db93b67815167eb..458d865c2cf4451a98f4e6df0dea727691d09bb0 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h @@ -822,6 +822,13 @@ struct dsi_ctrl_hw_ops { * @ctrl: Pointer to the controller host hardware. */ int (*wait_for_cmd_mode_mdp_idle)(struct dsi_ctrl_hw *ctrl); + + /** + * hw.ops.set_continuous_clk() - Set continuous clock + * @ctrl: Pointer to the controller host hardware. + * @enable: Bool to control continuous clock request. + */ + void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable); }; /* diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c index 2823402202c3ac4b352d69ff706099b05fad5575..470cc66f0ff177b88b5b3a5392a0e2f752247bb0 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c @@ -476,12 +476,6 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl, /* Disable Timing double buffering */ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0); - if (cfg->force_clk_lane_hs) { - reg = DSI_R32(ctrl, DSI_LANE_CTRL); - reg |= BIT(28); - DSI_W32(ctrl, DSI_LANE_CTRL, reg); - } - pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index); } @@ -1522,3 +1516,16 @@ int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl) return rc; } + +void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable) +{ + u32 reg = 0; + + reg = DSI_R32(ctrl, DSI_LANE_CTRL); + if (enable) + reg |= BIT(28); + else + reg &= ~BIT(28); + DSI_W32(ctrl, DSI_LANE_CTRL, reg); + wmb(); /* make sure request is set */ +} diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h index 86fc34134b29ff2b00b2fdcdb91f61fc262987de..60ae85da501668b5ab1f29d982d4594a92f71102 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -421,6 +421,7 @@ struct dsi_mode_info { * @append_tx_eot: Append EOT packets for forward transmissions if set to * true. * @ext_bridge_mode: External bridge is connected. + * @force_hs_clk_lane: Send continuous clock to the panel. */ struct dsi_host_common_cfg { enum dsi_pixel_format dst_format; @@ -440,6 +441,7 @@ struct dsi_host_common_cfg { bool ignore_rx_eot; bool append_tx_eot; bool ext_bridge_mode; + bool force_hs_clk_lane; }; /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index d6cd15307ccd8f51dcf2ecae27427f16b902869e..847553ba412220f2b93c43686e463bbea27276b2 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -933,6 +933,21 @@ int dsi_display_cmd_transfer(struct drm_connector *connector, return rc; } +static void _dsi_display_continuous_clk_ctrl(struct dsi_display *display, + bool enable) +{ + int i; + struct dsi_display_ctrl *ctrl; + + if (!display || !display->panel->host_config.force_hs_clk_lane) + return; + + for (i = 0; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + dsi_ctrl_set_continuous_clk(ctrl->ctrl, enable); + } +} + int dsi_display_soft_reset(void *display) { struct dsi_display *dsi_display; @@ -1000,7 +1015,8 @@ static bool dsi_display_get_cont_splash_status(struct dsi_display *display) struct dsi_display_ctrl *ctrl; struct dsi_ctrl_hw *hw; - for (i = 0; i < display->ctrl_count ; i++) { + for (i = 0; (i < display->ctrl_count) && + (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) { ctrl = &(display->ctrl[i]); if (!ctrl || !ctrl->ctrl) continue; @@ -2995,6 +3011,12 @@ int dsi_pre_clkoff_cb(void *priv, if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) && (l_type && DSI_LINK_LP_CLK)) { + /* + * If continuous clock is enabled then disable it + * before entering into ULPS Mode. + */ + if (display->panel->host_config.force_hs_clk_lane) + _dsi_display_continuous_clk_ctrl(display, false); /* * If ULPS feature is enabled, enter ULPS first. * However, when blanking the panel, we should enter ULPS @@ -3146,6 +3168,9 @@ int dsi_post_clkon_cb(void *priv, goto error; } } + + if (display->panel->host_config.force_hs_clk_lane) + _dsi_display_continuous_clk_ctrl(display, true); } /* enable dsi to serve irqs */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c index 0d78feb5b226445d3b8dce0d9ac1fe36817672db..bc485f569cf73400979acb5cd10baa16299c6cba 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c @@ -376,7 +376,7 @@ static int dsi_panel_reset(struct dsi_panel *panel) if (r_config->sequence[i].sleep_ms) usleep_range(r_config->sequence[i].sleep_ms * 1000, - r_config->sequence[i].sleep_ms * 1000); + (r_config->sequence[i].sleep_ms * 1000) + 100); } if (gpio_is_valid(panel->bl_config.en_gpio)) { @@ -1048,6 +1048,8 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host, host->ext_bridge_mode = utils->read_bool(utils->data, "qcom,mdss-dsi-ext-bridge-mode"); + host->force_hs_clk_lane = utils->read_bool(utils->data, + "qcom,mdss-dsi-force-clock-lane-hs"); return 0; } diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h index bf08360e986299225534d3790480575cb07886f6..6b6ada711a434f270b1e53e2408794c7b4400165 100644 --- a/drivers/gpu/drm/msm/sde/sde_ad4.h +++ b/drivers/gpu/drm/msm/sde/sde_ad4.h @@ -49,6 +49,7 @@ enum ad_property { AD_ASSERTIVE, AD_BACKLIGHT, AD_STRENGTH, + AD_ROI, AD_IPC_SUSPEND, AD_IPC_RESUME, AD_IPC_RESET, diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index 3cab0943d5476cfefde18ca356eab42f1bf46b28..265c1c78029f0cc2df264defbf64e69359e2b17e 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -137,6 +137,7 @@ enum { SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS, SDE_CP_CRTC_DSPP_AD_BACKLIGHT, SDE_CP_CRTC_DSPP_AD_STRENGTH, + SDE_CP_CRTC_DSPP_AD_ROI, SDE_CP_CRTC_DSPP_MAX, /* DSPP features end */ @@ -826,6 +827,15 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node, ad_cfg.hw_cfg = &hw_cfg; hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg); break; + case SDE_CP_CRTC_DSPP_AD_ROI: + if (!hw_dspp || !hw_dspp->ops.setup_ad) { + ret = -EINVAL; + continue; + } + ad_cfg.prop = AD_ROI; + ad_cfg.hw_cfg = &hw_cfg; + hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg); + break; default: ret = -EINVAL; break; @@ -1427,6 +1437,11 @@ static void dspp_ad_install_property(struct drm_crtc *crtc) "SDE_DSPP_AD_V4_BACKLIGHT", SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1), 0); + + sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_ROI", + SDE_CP_CRTC_DSPP_AD_ROI, 0, U64_MAX, 0); + sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_ROI, + sizeof(struct drm_msm_ad4_roi_cfg)); break; default: DRM_ERROR("version %d not supported\n", version); @@ -1590,6 +1605,7 @@ static void sde_cp_update_list(struct sde_cp_node *prop_node, case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS: case SDE_CP_CRTC_DSPP_AD_BACKLIGHT: case SDE_CP_CRTC_DSPP_AD_STRENGTH: + case SDE_CP_CRTC_DSPP_AD_ROI: if (dirty_list) list_add_tail(&prop_node->dirty_list, &crtc->ad_dirty); else @@ -1641,6 +1657,9 @@ static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node, case SDE_CP_CRTC_DSPP_AD_STRENGTH: ad_prop = AD_STRENGTH; break; + case SDE_CP_CRTC_DSPP_AD_ROI: + ad_prop = AD_ROI; + break; default: /* Not an AD property */ return 0; diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 707b6b5bc4d856ac5d092d379e8b25a6589ccb5b..3143bdb83c543a8af03e1a2f916915578608269e 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -53,6 +53,7 @@ static const struct drm_prop_enum_list e_topology_control[] = { {SDE_RM_TOPCTL_RESERVE_LOCK, "reserve_lock"}, {SDE_RM_TOPCTL_RESERVE_CLEAR, "reserve_clear"}, {SDE_RM_TOPCTL_DSPP, "dspp"}, + {SDE_RM_TOPCTL_DS, "ds"}, }; static const struct drm_prop_enum_list e_power_mode[] = { {SDE_MODE_DPMS_ON, "ON"}, diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 683ccac7ac59c04f2da5f4b69f4136df8dcf8a44..4b20c80950d1f5ccb4d72efa284313734e551a1f 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -4282,6 +4282,7 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) struct sde_crtc_irq_info *node = NULL; int ret = 0; struct drm_event event; + struct msm_drm_private *priv; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -4289,6 +4290,7 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) } sde_crtc = to_sde_crtc(crtc); cstate = to_sde_crtc_state(crtc->state); + priv = crtc->dev->dev_private; mutex_lock(&sde_crtc->crtc_lock); @@ -4296,6 +4298,12 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) switch (event_type) { case SDE_POWER_EVENT_POST_ENABLE: + /* disable mdp LUT memory retention */ + ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", + CLKFLAG_NORETAIN_MEM); + if (ret) + SDE_ERROR("disable LUT memory retention err %d\n", ret); + /* restore encoder; crtc will be programmed during commit */ drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) @@ -4328,6 +4336,12 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) } break; case SDE_POWER_EVENT_PRE_DISABLE: + /* enable mdp LUT memory retention */ + ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", + CLKFLAG_RETAIN_MEM); + if (ret) + SDE_ERROR("enable LUT memory retention err %d\n", ret); + drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; @@ -4449,12 +4463,6 @@ static void sde_crtc_disable(struct drm_crtc *crtc) msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&power_on); - /* disable mdp LUT memory retention */ - ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", - CLKFLAG_NORETAIN_MEM); - if (ret) - SDE_ERROR("failed to disable LUT memory retention %d\n", ret); - /* destination scaler if enabled should be reconfigured on resume */ if (cstate->num_ds_enabled) sde_crtc->ds_reconfig = true; @@ -4619,12 +4627,6 @@ static void sde_crtc_enable(struct drm_crtc *crtc, msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&power_on); - /* enable mdp LUT memory retention */ - ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", - CLKFLAG_RETAIN_MEM); - if (ret) - SDE_ERROR("failed to enable LUT memory retention %d\n", ret); - mutex_unlock(&sde_crtc->crtc_lock); spin_lock_irqsave(&sde_crtc->spin_lock, flags); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index efb9217e469a2d23187fca4a976f542b7980fcef..0a47b86a21b9c25b51732d160c362daf76e7bc9f 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -445,6 +445,13 @@ int sde_encoder_in_clone_mode(struct drm_encoder *drm_enc) sde_enc->cur_master->in_clone_mode; } +bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + + return sde_enc && sde_enc->disp_info.is_primary; +} + int sde_encoder_in_cont_splash(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index c6829bc99f012ab71e05c7e7a01d244b656652b2..7c4518f116e2593db52d5280286a9dd562ae9acc 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -281,6 +281,14 @@ void sde_encoder_recovery_events_handler(struct drm_encoder *encoder, */ int sde_encoder_in_clone_mode(struct drm_encoder *enc); +/** + * sde_encoder_is_primary_display - checks if underlying display is primary + * display or not. + * @drm_enc: Pointer to drm encoder structure + * @Return: true if it is primary display. false if secondary display + */ +bool sde_encoder_is_primary_display(struct drm_encoder *enc); + /** * sde_encoder_control_idle_pc - control enable/disable of idle power collapse * @drm_enc: Pointer to drm encoder structure diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 771249106a7036e05c343d248e3ddda653230cd2..872d98c5646504419586ffc4ce26113ce671c196 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -381,6 +381,8 @@ struct sde_encoder_phys_cmd_autorefresh { * @rd_ptr_timestamp: last rd_ptr_irq timestamp * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK * @pending_vblank_wq: Wait queue for blocking until VBLANK received + * @ctl_start_threshold: A threshold in microseconds allows command mode + * engine to trigger the retire fence without waiting for rd_ptr. */ struct sde_encoder_phys_cmd { struct sde_encoder_phys base; @@ -392,6 +394,7 @@ struct sde_encoder_phys_cmd { ktime_t rd_ptr_timestamp; atomic_t pending_vblank_cnt; wait_queue_head_t pending_vblank_wq; + u32 ctl_start_threshold; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index fa58126f713d628a53951c0636075a8bc00e49af..ff85985e80e62d329267196ff7e0010ce89eff56 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -333,9 +333,11 @@ static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) * Handle rare cases where the ctl_start_irq is received * after rd_ptr_irq. If it falls within a threshold, it is * guaranteed the frame would be picked up in the current TE. - * Signal retire fence immediately in such case. + * Signal retire fence immediately in such case. The threshold + * timer adds extra line time duration based on lowest panel + * fps for qsync enabled case. */ - if ((time_diff_us <= SDE_ENC_CTL_START_THRESHOLD_US) + if ((time_diff_us <= cmd_enc->ctl_start_threshold) && atomic_add_unless( &phys_enc->pending_retire_fence_cnt, -1, 0)) { @@ -896,7 +898,8 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc, } } -static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc) +static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc, + u32 *extra_frame_trigger_time) { struct drm_connector *conn = phys_enc->connector; u32 qsync_mode; @@ -905,6 +908,7 @@ static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc) struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); + *extra_frame_trigger_time = 0; if (!conn || !conn->state) return 0; @@ -957,6 +961,8 @@ static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc) SDE_EVT32(qsync_mode, qsync_min_fps, extra_time_ns, default_fps, yres, threshold_lines); + + *extra_frame_trigger_time = extra_time_ns; } exit: @@ -973,7 +979,7 @@ static void sde_encoder_phys_cmd_tearcheck_config( struct sde_hw_tear_check tc_cfg = { 0 }; struct drm_display_mode *mode; bool tc_enable = true; - u32 vsync_hz; + u32 vsync_hz, extra_frame_trigger_time; struct msm_drm_private *priv; struct sde_kms *sde_kms; @@ -1037,11 +1043,15 @@ static void sde_encoder_phys_cmd_tearcheck_config( */ tc_cfg.sync_cfg_height = 0xFFF0; tc_cfg.vsync_init_val = mode->vdisplay; - tc_cfg.sync_threshold_start = _get_tearcheck_threshold(phys_enc); + tc_cfg.sync_threshold_start = _get_tearcheck_threshold(phys_enc, + &extra_frame_trigger_time); tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; tc_cfg.start_pos = mode->vdisplay; tc_cfg.rd_ptr_irq = mode->vdisplay + 1; + cmd_enc->ctl_start_threshold = (extra_frame_trigger_time / 1000) + + SDE_ENC_CTL_START_THRESHOLD_US; + SDE_DEBUG_CMDENC(cmd_enc, "tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n", phys_enc->hw_pp->idx - PINGPONG_0, @@ -1059,11 +1069,12 @@ static void sde_encoder_phys_cmd_tearcheck_config( tc_cfg.hw_vsync_mode, tc_cfg.vsync_count, tc_cfg.vsync_init_val); SDE_DEBUG_CMDENC(cmd_enc, - "tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u\n", + "tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u ctl_start_threshold:%d\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, tc_cfg.sync_cfg_height, - tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue); + tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue, + cmd_enc->ctl_start_threshold); if (phys_enc->has_intf_te) { phys_enc->hw_intf->ops.setup_tearcheck(phys_enc->hw_intf, @@ -1335,6 +1346,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); int ret; + u32 extra_frame_trigger_time; if (!phys_enc || !phys_enc->hw_pp) { SDE_ERROR("invalid encoder\n"); @@ -1361,7 +1373,8 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( if (sde_connector_is_qsync_updated(phys_enc->connector)) { tc_cfg.sync_threshold_start = - _get_tearcheck_threshold(phys_enc); + _get_tearcheck_threshold(phys_enc, + &extra_frame_trigger_time); if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.update_tearcheck) phys_enc->hw_intf->ops.update_tearcheck( @@ -1369,8 +1382,12 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( else if (phys_enc->hw_pp->ops.update_tearcheck) phys_enc->hw_pp->ops.update_tearcheck( phys_enc->hw_pp, &tc_cfg); + + cmd_enc->ctl_start_threshold = + (extra_frame_trigger_time / 1000) + + SDE_ENC_CTL_START_THRESHOLD_US; SDE_EVT32(DRMID(phys_enc->parent), - tc_cfg.sync_threshold_start); + tc_cfg.sync_threshold_start, cmd_enc->ctl_start_threshold); } SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n", @@ -1677,6 +1694,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( phys_enc->enc_spinlock = p->enc_spinlock; phys_enc->vblank_ctl_lock = p->vblank_ctl_lock; cmd_enc->stream_sel = 0; + cmd_enc->ctl_start_threshold = SDE_ENC_CTL_START_THRESHOLD_US; phys_enc->enable_state = SDE_ENC_DISABLED; sde_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->comp_type = p->comp_type; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 72bb1bf47de030577b54bd33478b1fb798c396d7..0825cec04f14cb566d57ee74d40cd3472c81cf61 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -1049,12 +1049,52 @@ static int sde_encoder_phys_vid_prepare_for_kickoff( return rc; } +static void sde_encoder_phys_vid_single_vblank_wait( + struct sde_encoder_phys *phys_enc) +{ + int ret; + struct sde_encoder_phys_vid *vid_enc + = to_sde_encoder_phys_vid(phys_enc); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true); + if (ret) { + SDE_ERROR_VIDENC(vid_enc, + "failed to enable vblank irq: %d\n", + ret); + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, ret, + SDE_EVTLOG_FUNC_CASE1, + SDE_EVTLOG_ERROR); + } else { + ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false); + if (ret) { + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + SDE_ERROR_VIDENC(vid_enc, + "failure waiting for disable: %d\n", + ret); + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, ret, + SDE_EVTLOG_FUNC_CASE2, + SDE_EVTLOG_ERROR); + } + sde_encoder_phys_vid_control_vblank_irq(phys_enc, false); + } +} + static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) { struct msm_drm_private *priv; struct sde_encoder_phys_vid *vid_enc; unsigned long lock_flags; - int ret; + struct intf_status intf_status = {0}; if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev || !phys_enc->parent->dev->dev_private) { @@ -1074,6 +1114,8 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) return; + else if (!sde_encoder_phys_vid_is_master(phys_enc)) + goto exit; if (phys_enc->enable_state == SDE_ENC_DISABLED) { SDE_ERROR("already disabled\n"); @@ -1082,43 +1124,20 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); - if (sde_encoder_phys_vid_is_master(phys_enc)) - sde_encoder_phys_inc_pending(phys_enc); + sde_encoder_phys_inc_pending(phys_enc); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); - if (!sde_encoder_phys_vid_is_master(phys_enc)) - goto exit; + sde_encoder_phys_vid_single_vblank_wait(phys_enc); + if (phys_enc->hw_intf->ops.get_status) + phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, + &intf_status); - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true); - if (ret) { - SDE_ERROR_VIDENC(vid_enc, - "failed to enable vblank irq: %d\n", - ret); - SDE_EVT32(DRMID(phys_enc->parent), - phys_enc->hw_intf->idx - INTF_0, ret, - SDE_EVTLOG_FUNC_CASE1, - SDE_EVTLOG_ERROR); - } else { - ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false); - if (ret) { - atomic_set(&phys_enc->pending_kickoff_cnt, 0); - SDE_ERROR_VIDENC(vid_enc, - "failure waiting for disable: %d\n", - ret); - SDE_EVT32(DRMID(phys_enc->parent), - phys_enc->hw_intf->idx - INTF_0, ret, - SDE_EVTLOG_FUNC_CASE2, - SDE_EVTLOG_ERROR); - } - sde_encoder_phys_vid_control_vblank_irq(phys_enc, false); + if (intf_status.is_en) { + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + sde_encoder_phys_inc_pending(phys_enc); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + sde_encoder_phys_vid_single_vblank_wait(phys_enc); } sde_encoder_helper_phys_disable(phys_enc, NULL); @@ -1149,11 +1168,15 @@ static void sde_encoder_phys_vid_handle_post_kickoff( * Video encoders need to turn on their interfaces now */ if (phys_enc->enable_state == SDE_ENC_ENABLING) { - SDE_EVT32(DRMID(phys_enc->parent), + if (sde_encoder_phys_vid_is_master(phys_enc)) { + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0); - spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); - spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, + 1); + spin_unlock_irqrestore(phys_enc->enc_spinlock, + lock_flags); + } phys_enc->enable_state = SDE_ENC_ENABLED; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index e122fa3c563cf9d0fa1e8bafd70a41d3837485a4..d54bceea1733f46382916d26500683f9a7abfb98 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -1126,8 +1126,9 @@ static void sde_encoder_phys_wb_irq_ctrl( atomic_dec_return(&phys->wbirq_refcount); for (index = 0; index < CRTC_DUAL_MIXERS; index++) - sde_encoder_helper_register_irq(phys, - cwb_irq_tbl[index + pp]); + if (cwb_irq_tbl[index + pp] != SDE_NONE) + sde_encoder_helper_register_irq(phys, + cwb_irq_tbl[index + pp]); } else if (!enable && atomic_dec_return(&phys->wbirq_refcount) == 0) { sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE); @@ -1135,8 +1136,9 @@ static void sde_encoder_phys_wb_irq_ctrl( atomic_inc_return(&phys->wbirq_refcount); for (index = 0; index < CRTC_DUAL_MIXERS; index++) - sde_encoder_helper_unregister_irq(phys, - cwb_irq_tbl[index + pp]); + if (cwb_irq_tbl[index + pp] != SDE_NONE) + sde_encoder_helper_unregister_irq(phys, + cwb_irq_tbl[index + pp]); } } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c index b7b4eba01f8c7ff578fee01d1472be2c96a98277..03a251b6ea10a77a9fedc04d776042e042a3d129 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c @@ -48,6 +48,15 @@ enum ad4_state { ad4_state_max, }; +struct ad4_roi_info { + u32 h_start; + u32 h_end; + u32 v_start; + u32 v_end; + u32 f_in; + u32 f_out; +}; + typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *ad); @@ -77,6 +86,11 @@ static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg); static int ad4_input_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg); +static int ad4_roi_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg); +static int ad4_roi_setup_ipcr(struct sde_hw_dspp *dspp, + struct sde_ad_hw_cfg *cfg); +static int ad4_roi_coordinate_offset(struct sde_hw_cp_cfg *hw_cfg, + struct ad4_roi_info *output); static int ad4_input_setup_idle(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg); static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp, @@ -118,6 +132,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup, [ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup, [ad4_state_idle][AD_STRENGTH] = ad4_strength_setup_idle, + [ad4_state_idle][AD_ROI] = ad4_no_op_setup, [ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup, [ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup, [ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup, @@ -131,6 +146,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_startup][AD_BACKLIGHT] = ad4_backlight_setup, [ad4_state_startup][AD_IPC_SUSPEND] = ad4_no_op_setup, [ad4_state_startup][AD_STRENGTH] = ad4_no_op_setup, + [ad4_state_startup][AD_ROI] = ad4_roi_setup, [ad4_state_startup][AD_IPC_RESUME] = ad4_no_op_setup, [ad4_state_startup][AD_IPC_RESET] = ad4_ipc_reset_setup_startup, @@ -142,6 +158,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup, [ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup, [ad4_state_run][AD_STRENGTH] = ad4_no_op_setup, + [ad4_state_run][AD_ROI] = ad4_roi_setup, [ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run, [ad4_state_run][AD_IPC_RESUME] = ad4_no_op_setup, [ad4_state_run][AD_IPC_RESET] = ad4_setup_debug, @@ -154,6 +171,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_ipcs][AD_ASSERTIVE] = ad4_no_op_setup, [ad4_state_ipcs][AD_BACKLIGHT] = ad4_no_op_setup, [ad4_state_ipcs][AD_STRENGTH] = ad4_no_op_setup, + [ad4_state_ipcs][AD_ROI] = ad4_no_op_setup, [ad4_state_ipcs][AD_IPC_SUSPEND] = ad4_no_op_setup, [ad4_state_ipcs][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcs, [ad4_state_ipcs][AD_IPC_RESET] = ad4_no_op_setup, @@ -166,6 +184,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup_ipcr, [ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup_ipcr, [ad4_state_ipcr][AD_STRENGTH] = ad4_no_op_setup, + [ad4_state_ipcr][AD_ROI] = ad4_roi_setup_ipcr, [ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_ipcr, [ad4_state_ipcr][AD_IPC_RESUME] = ad4_no_op_setup, [ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr, @@ -178,6 +197,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = { [ad4_state_manual][AD_ASSERTIVE] = ad4_no_op_setup, [ad4_state_manual][AD_BACKLIGHT] = ad4_no_op_setup, [ad4_state_manual][AD_STRENGTH] = ad4_strength_setup, + [ad4_state_manual][AD_ROI] = ad4_no_op_setup, [ad4_state_manual][AD_IPC_SUSPEND] = ad4_no_op_setup, [ad4_state_manual][AD_IPC_RESUME] = ad4_no_op_setup, [ad4_state_manual][AD_IPC_RESET] = ad4_setup_debug_manual, @@ -191,16 +211,19 @@ struct ad4_info { bool is_master; u32 last_assertive; u32 cached_assertive; + u32 last_str_inroi; + u32 last_str_outroi; u64 last_als; u64 cached_als; u64 last_bl; u64 cached_bl; - u32 last_str; u32 frame_count; u32 frmt_mode; u32 irdx_control_0; u32 tf_ctrl; u32 vc_control_0; + struct ad4_roi_info last_roi_cfg; + struct ad4_roi_info cached_roi_cfg; }; static struct ad4_info info[DSPP_MAX] = { @@ -319,7 +342,7 @@ static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) { - u32 strength = 0; + u32 in_str = 0, out_str = 0; struct sde_hw_mixer *hw_lm; hw_lm = cfg->hw_cfg->mixer_info; @@ -327,9 +350,10 @@ static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) /* this AD core is the salve core */ return 0; - strength = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x4c); - pr_debug("%s(): AD strength = %d\n", __func__, strength); - + in_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x4c); + out_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x50); + pr_debug("%s(): AD in strength %d, out strength %d\n", __func__, + in_str, out_str); return 0; } @@ -368,6 +392,18 @@ static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode) info[dspp->idx].cached_bl = U64_MAX; info[dspp->idx].last_als = 0x0; info[dspp->idx].cached_als = U64_MAX; + info[dspp->idx].last_roi_cfg.h_start = 0x0; + info[dspp->idx].last_roi_cfg.h_end = 0xffff; + info[dspp->idx].last_roi_cfg.v_start = 0x0; + info[dspp->idx].last_roi_cfg.v_end = 0xffff; + info[dspp->idx].last_roi_cfg.f_in = 0x400; + info[dspp->idx].last_roi_cfg.f_out = 0x400; + info[dspp->idx].cached_roi_cfg.h_start = U32_MAX; + info[dspp->idx].cached_roi_cfg.h_end = U32_MAX; + info[dspp->idx].cached_roi_cfg.v_start = U32_MAX; + info[dspp->idx].cached_roi_cfg.v_end = U32_MAX; + info[dspp->idx].cached_roi_cfg.f_in = U32_MAX; + info[dspp->idx].cached_roi_cfg.f_out = U32_MAX; } else { if (mode == AD4_MANUAL) { /*vc_control_0 */ @@ -768,15 +804,6 @@ static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) } ad_cfg = cfg->hw_cfg->payload; - blk_offset = 0x18; - val = (ad_cfg->cfg_param_002 & (BIT(16) - 1)); - val |= ((ad_cfg->cfg_param_001 & (BIT(16) - 1)) << 16); - SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); - blk_offset += 4; - val = (ad_cfg->cfg_param_004 & (BIT(16) - 1)); - val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16); - SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); - blk_offset = 0x20; val = (ad_cfg->cfg_param_005 & (BIT(8) - 1)); SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); @@ -793,10 +820,6 @@ static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) blk_offset = 0x3c; val = (ad_cfg->cfg_param_010 & (BIT(12) - 1)); SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); - blk_offset += 4; - val = ((ad_cfg->cfg_param_011 & (BIT(16) - 1)) << 16); - val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1)); - SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); blk_offset = 0x88; val = (ad_cfg->cfg_param_013 & (BIT(8) - 1)); @@ -891,10 +914,6 @@ static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) info[dspp->idx].vc_control_0 = (ad_cfg->cfg_param_041 & (BIT(7) - 1)); - blk_offset = 0x160; - val = (ad_cfg->cfg_param_043 & (BIT(10) - 1)); - SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); - blk_offset = 0x16c; val = (ad_cfg->cfg_param_044 & (BIT(8) - 1)); SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); @@ -934,6 +953,145 @@ static int ad4_input_setup(struct sde_hw_dspp *dspp, return 0; } +static int ad4_roi_setup(struct sde_hw_dspp *dspp, + struct sde_ad_hw_cfg *cfg) +{ + int ret = 0; + u32 blk_offset = 0, val = 0; + struct ad4_roi_info roi_cfg = {}; + + ret = ad4_roi_coordinate_offset(cfg->hw_cfg, &roi_cfg); + if (ret) { + DRM_ERROR("params invalid\n"); + return -EINVAL; + } + info[dspp->idx].last_roi_cfg = roi_cfg; + + /*roi h start and end*/ + blk_offset = 0x18; + val = (info[dspp->idx].last_roi_cfg.h_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].last_roi_cfg.h_start & (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + + /*roi v start and end*/ + blk_offset += 4; + val = (info[dspp->idx].last_roi_cfg.v_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].last_roi_cfg.v_start & (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + + /*roi factor in and out*/ + blk_offset = 0x40; + val = ((info[dspp->idx].last_roi_cfg.f_in & (BIT(16) - 1)) << 16); + val |= (info[dspp->idx].last_roi_cfg.f_out & (BIT(16) - 1)); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + + return ret; +} + +static int ad4_roi_setup_ipcr(struct sde_hw_dspp *dspp, + struct sde_ad_hw_cfg *cfg) +{ + int ret = 0; + struct ad4_roi_info roi_cfg = {}; + + ret = ad4_roi_coordinate_offset(cfg->hw_cfg, &roi_cfg); + if (ret) { + DRM_ERROR("params invalid\n"); + return -EINVAL; + } + + info[dspp->idx].cached_roi_cfg = roi_cfg; + + return 0; +} + +static int ad4_roi_coordinate_offset(struct sde_hw_cp_cfg *hw_cfg, + struct ad4_roi_info *output) +{ + struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info; + struct drm_msm_ad4_roi_cfg *roi = NULL; + + if (!hw_cfg->payload) { + output->h_start = 0x0; + output->h_end = hw_cfg->displayh; + output->v_start = 0x0; + output->v_end = hw_cfg->displayv; + output->f_in = 0x400; + output->f_out = 0x400; + return 0; + } + + if (hw_cfg->len != sizeof(struct drm_msm_ad4_roi_cfg)) { + DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n", + sizeof(struct drm_msm_ad4_roi_cfg), hw_cfg->len, + hw_cfg->payload); + return -EINVAL; + } + roi = (struct drm_msm_ad4_roi_cfg *)hw_cfg->payload; + + if (roi->h_x >= hw_cfg->displayh || roi->v_x >= hw_cfg->displayv) { + DRM_ERROR("invalid roi=[%u,%u,%u,%u], display=[%u,%u]\n", + roi->h_x, roi->h_y, roi->v_x, roi->v_y, + hw_cfg->displayh, hw_cfg->displayv); + return -EINVAL; + } + + if (roi->h_x >= roi->h_y || roi->v_x >= roi->v_y) { + DRM_ERROR("invalid roi=[%u,%u,%u,%u], display=[%u,%u]\n", + roi->h_x, roi->h_y, roi->v_x, roi->v_y, + hw_cfg->displayh, hw_cfg->displayv); + return -EINVAL; + } + + if (roi->h_y > hw_cfg->displayh) + roi->h_y = hw_cfg->displayh; + + if (roi->v_y > hw_cfg->displayv) + roi->v_y = hw_cfg->displayv; + + /* single dspp cfg */ + output->h_start = roi->h_x; + output->h_end = roi->h_y; + output->v_start = roi->v_x; + output->v_end = roi->v_y; + output->f_in = roi->factor_in; + output->f_out = roi->factor_out; + + /* check whether dual dspp */ + if (hw_cfg->num_of_mixers != 2) + return 0; + + if (roi->h_y <= hw_lm->cfg.out_width) { + if (hw_lm->cfg.right_mixer) { + /* the region on the left of screen, clear right info */ + output->h_start = 0; + output->h_end = 0; + output->v_start = 0; + output->v_end = 0; + } + } else if (roi->h_x < hw_lm->cfg.out_width) { + /* the region occupy both sides of screen: left and right */ + if (hw_lm->cfg.right_mixer) { + output->h_start = 0; + output->h_end -= hw_lm->cfg.out_width; + } else { + output->h_end = hw_lm->cfg.out_width; + } + } else { + /* the region on the right of the screen*/ + if (hw_lm->cfg.right_mixer) { + output->h_start -= hw_lm->cfg.out_width; + output->h_end -= hw_lm->cfg.out_width; + } else { + output->h_start = 0; + output->h_end = 0; + output->v_start = 0; + output->v_end = 0; + } + } + return 0; +} + static int ad4_suspend_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) { @@ -1335,7 +1493,7 @@ void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) { - u32 strength = 0, i = 0; + u32 in_str = 0, out_str = 0, i = 0; struct sde_hw_mixer *hw_lm; hw_lm = cfg->hw_cfg->mixer_info; @@ -1343,16 +1501,21 @@ static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp, /* this AD core is the salve core */ for (i = DSPP_0; i < DSPP_MAX; i++) { if (info[i].is_master) { - strength = info[i].last_str; + in_str = info[i].last_str_inroi; + out_str = info[i].last_str_outroi; break; } } } else { - strength = SDE_REG_READ(&dspp->hw, + in_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x4c); - pr_debug("%s(): AD strength = %d\n", __func__, strength); + out_str = SDE_REG_READ(&dspp->hw, + dspp->cap->sblk->ad.base + 0x50); + pr_debug("%s(): AD in strength %d, out %d\n", __func__, + in_str, out_str); } - info[dspp->idx].last_str = strength; + info[dspp->idx].last_str_inroi = in_str; + info[dspp->idx].last_str_outroi = out_str; info[dspp->idx].state = ad4_state_ipcs; pr_debug("%s(): AD state move to ipcs\n", __func__); @@ -1378,10 +1541,29 @@ static int ad4_ipc_resume_setup_ipcs(struct sde_hw_dspp *dspp, blk_offset = 0x34; val = (0x55 & (BIT(8) - 1)); SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + + /* set roi config */ + blk_offset = 0x18; + val = (info[dspp->idx].last_roi_cfg.h_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].last_roi_cfg.h_start & (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + blk_offset += 4; + val = (info[dspp->idx].last_roi_cfg.v_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].last_roi_cfg.v_start & (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + blk_offset = 0x40; + val = ((info[dspp->idx].last_roi_cfg.f_in & (BIT(16) - 1)) << 16); + val |= (info[dspp->idx].last_roi_cfg.f_out & (BIT(16) - 1)); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + /* set manual strength */ blk_offset = 0x15c; - val = (info[dspp->idx].last_str & (BIT(10) - 1)); + val = (info[dspp->idx].last_str_inroi & (BIT(10) - 1)); SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + blk_offset = 0x160; + val = (info[dspp->idx].last_str_outroi & (BIT(10) - 1)); + SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val); + /* enable manual mode */ blk_offset = 0x138; SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0); @@ -1401,7 +1583,7 @@ static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) { int ret; - u32 strength = 0, i = 0; + u32 in_str = 0, out_str = 0, i = 0; struct sde_hw_mixer *hw_lm; /* Read AD calculator strength output during the 2 frames of manual @@ -1414,20 +1596,25 @@ static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp, /* this AD core is the salve core */ for (i = DSPP_0; i < DSPP_MAX; i++) { if (info[i].is_master) { - strength = info[i].last_str; + in_str = info[i].last_str_inroi; + out_str = info[i].last_str_outroi; break; } } } else { - strength = SDE_REG_READ(&dspp->hw, + in_str = SDE_REG_READ(&dspp->hw, dspp->cap->sblk->ad.base + 0x4c); - pr_debug("%s(): AD strength = %d\n", __func__, strength); + out_str = SDE_REG_READ(&dspp->hw, + dspp->cap->sblk->ad.base + 0x50); + pr_debug("%s(): AD in strength %d, out %d\n", __func__, + in_str, out_str); } if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) { info[dspp->idx].state = ad4_state_run; pr_debug("%s(): AD state move to run\n", __func__); - info[dspp->idx].last_str = strength; + info[dspp->idx].last_str_inroi = in_str; + info[dspp->idx].last_str_outroi = out_str; ret = ad4_cfg_ipc_reset(dspp, cfg); if (ret) return ret; @@ -1441,7 +1628,7 @@ static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp, static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg) { - u32 blk_offset; + u32 blk_offset, val = 0; /* revert manual strength */ /* tf control */ @@ -1477,6 +1664,35 @@ static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp, info[dspp->idx].cached_assertive = U8_MAX; } + /*reset cached roi config*/ + if (info[dspp->idx].cached_roi_cfg.h_start != U32_MAX) { + blk_offset = 0x18; + val = (info[dspp->idx].cached_roi_cfg.h_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].cached_roi_cfg.h_start & + (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, + dspp->cap->sblk->ad.base + blk_offset, val); + blk_offset += 4; + val = (info[dspp->idx].cached_roi_cfg.v_end & (BIT(16) - 1)); + val |= ((info[dspp->idx].cached_roi_cfg.v_start & + (BIT(16) - 1)) << 16); + SDE_REG_WRITE(&dspp->hw, + dspp->cap->sblk->ad.base + blk_offset, val); + blk_offset = 0x40; + val = ((info[dspp->idx].cached_roi_cfg.f_in & + (BIT(16) - 1)) << 16); + val |= (info[dspp->idx].cached_roi_cfg.f_out & (BIT(16) - 1)); + SDE_REG_WRITE(&dspp->hw, + dspp->cap->sblk->ad.base + blk_offset, val); + + info[dspp->idx].last_roi_cfg = info[dspp->idx].cached_roi_cfg; + info[dspp->idx].cached_roi_cfg.h_start = U32_MAX; + info[dspp->idx].cached_roi_cfg.h_end = U32_MAX; + info[dspp->idx].cached_roi_cfg.v_start = U32_MAX; + info[dspp->idx].cached_roi_cfg.v_end = U32_MAX; + info[dspp->idx].cached_roi_cfg.f_in = U32_MAX; + info[dspp->idx].cached_roi_cfg.f_out = U32_MAX; + } return 0; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 041a59144208ab6eb3c4809f444eb47be066bf7a..1007702196a6ce06a9413e05aa77fd47353676bf 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -3622,7 +3622,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) sde_cfg->delay_prg_fetch_start = true; sde_cfg->sui_ns_allowed = true; sde_cfg->sui_misr_supported = true; - sde_cfg->sui_block_xin_mask = 0x2EE3; + sde_cfg->sui_block_xin_mask = 0x2EE1; } else { SDE_ERROR("unsupported chipset id:%X\n", hw_rev); sde_cfg->perf.min_prefill_lines = 0xffff; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 4c247a63ca3789ee6a11ed0f2f6e424acaae76cb..acb50b86e0232d0daa644e77a335cd23826bf2de 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -61,6 +61,7 @@ #define CTL_SSPP_MAX_RECTS 2 #define SDE_REG_RESET_TIMEOUT_US 2000 +#define SDE_REG_WAIT_RESET_TIMEOUT_US 100000 #define UPDATE_MASK(m, idx, en) \ ((m) = (en) ? ((m) | BIT((idx))) : ((m) & ~BIT((idx)))) @@ -752,7 +753,7 @@ static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx) return 0; pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); - if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US)) { + if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_WAIT_RESET_TIMEOUT_US)) { pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 2036ef8b106dfb2725c04fe60f8ee8e524e5b252..51129920555a211d793c58ca88fdbb091205577b 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -69,6 +69,7 @@ #define INTF_MISR_SIGNATURE 0x184 #define INTF_MUX 0x25C +#define INTF_STATUS 0x26C #define INTF_AVR_CONTROL 0x270 #define INTF_AVR_MODE 0x274 #define INTF_AVR_TRIGGER 0x278 @@ -387,6 +388,21 @@ static void sde_hw_intf_get_status( } } +static void sde_hw_intf_v1_get_status( + struct sde_hw_intf *intf, + struct intf_status *s) +{ + struct sde_hw_blk_reg_map *c = &intf->hw; + + s->is_en = SDE_REG_READ(c, INTF_STATUS) & BIT(0); + if (s->is_en) { + s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT); + s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT); + } else { + s->line_count = 0; + s->frame_count = 0; + } +} static void sde_hw_intf_setup_misr(struct sde_hw_intf *intf, bool enable, u32 frame_count) { @@ -625,6 +641,7 @@ static void _setup_intf_ops(struct sde_hw_intf_ops *ops, ops->get_autorefresh = sde_hw_intf_get_autorefresh_config; ops->poll_timeout_wr_ptr = sde_hw_intf_poll_timeout_wr_ptr; ops->vsync_sel = sde_hw_intf_vsync_sel; + ops->get_status = sde_hw_intf_v1_get_status; } } diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c index d806c5a4a1960d4b212c400432ce97f1fa44611d..bd5938570dee6ab15a1f8855fdf0bb3f3d0a1a7d 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.c +++ b/drivers/gpu/drm/msm/sde/sde_rm.c @@ -1438,14 +1438,18 @@ static int _sde_rm_populate_requirements( return -EINVAL; } - /** - * Set the requirement based on caps if not set from user space - * This will ensure to select LM tied with DS blocks - * Currently, DS blocks are tied with LM 0 and LM 1 (primary display) + /* + * select dspp HW block for all dsi displays and ds for only + * primary dsi display. */ - if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler && - conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) - reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS); + if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) { + if (!RM_RQ_DSPP(reqs)) + reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP); + + if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler && + sde_encoder_is_primary_display(enc)) + reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS); + } /** * Set the requirement for LM which has CWB support if CWB is diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c index d9771b4636c1fe53677d8d9d03e611596a1a4c57..9c34e18aa53787a724b7fab58368909f3bf18a46 100644 --- a/drivers/gpu/drm/msm/sde_io_util.c +++ b/drivers/gpu/drm/msm/sde_io_util.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2012-2015, 2017-2018 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2015, 2017, 2018, The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -242,7 +243,7 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable) need_sleep = !regulator_is_enabled(in_vreg[i].vreg); if (in_vreg[i].pre_on_sleep && need_sleep) usleep_range(in_vreg[i].pre_on_sleep * 1000, - in_vreg[i].pre_on_sleep * 1000); + (in_vreg[i].pre_on_sleep * 1000) + 10); rc = regulator_set_load(in_vreg[i].vreg, in_vreg[i].enable_load); if (rc < 0) { @@ -254,7 +255,7 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable) rc = regulator_enable(in_vreg[i].vreg); if (in_vreg[i].post_on_sleep && need_sleep) usleep_range(in_vreg[i].post_on_sleep * 1000, - in_vreg[i].post_on_sleep * 1000); + (in_vreg[i].post_on_sleep * 1000) + 10); if (rc < 0) { DEV_ERR("%pS->%s: %s enable failed\n", __builtin_return_address(0), __func__, @@ -277,13 +278,13 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable) } if (in_vreg[i].pre_off_sleep) usleep_range(in_vreg[i].pre_off_sleep * 1000, - in_vreg[i].pre_off_sleep * 1000); + (in_vreg[i].pre_off_sleep * 1000) + 10); regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load); regulator_disable(in_vreg[i].vreg); if (in_vreg[i].post_off_sleep) usleep_range(in_vreg[i].post_off_sleep * 1000, - in_vreg[i].post_off_sleep * 1000); + (in_vreg[i].post_off_sleep * 1000) + 10); } } return rc; @@ -295,13 +296,13 @@ int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable) for (i--; i >= 0; i--) { if (in_vreg[i].pre_off_sleep) usleep_range(in_vreg[i].pre_off_sleep * 1000, - in_vreg[i].pre_off_sleep * 1000); + (in_vreg[i].pre_off_sleep * 1000) + 10); regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load); regulator_disable(in_vreg[i].vreg); if (in_vreg[i].post_off_sleep) usleep_range(in_vreg[i].post_off_sleep * 1000, - in_vreg[i].post_off_sleep * 1000); + (in_vreg[i].post_off_sleep * 1000) + 10); } return rc; diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 54350e72a9f2fa7b9f156969d8ebf94f393f46bb..b1f075ce037d63868502d051e685c88f552b7e2e 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -14,6 +14,7 @@ msm_kgsl_core-y = \ kgsl_pool.o \ kgsl_gmu_core.o \ kgsl_gmu.o \ + kgsl_rgmu.o \ kgsl_hfi.o msm_kgsl_core-$(CONFIG_QCOM_KGSL_IOMMU) += kgsl_iommu.o @@ -41,6 +42,7 @@ msm_adreno-y += \ adreno_a5xx_preempt.o \ adreno_a6xx_preempt.o \ adreno_a6xx_gmu.o \ + adreno_a6xx_rgmu.o \ adreno_sysfs.o \ adreno.o \ adreno_cp_parser.o \ diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index 41854d22743fa2e4058b3ed59bde7a99d40091f6..9c1e798367b23ada33045134b420611a2853d5f5 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -412,11 +412,13 @@ #define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f #define A6XX_RBBM_INT_CLEAR_CMD 0x00037 #define A6XX_RBBM_INT_0_MASK 0x00038 +#define A6XX_RBBM_INT_2_MASK 0x0003A #define A6XX_RBBM_SP_HYST_CNT 0x00042 #define A6XX_RBBM_SW_RESET_CMD 0x00043 #define A6XX_RBBM_RAC_THRESHOLD_CNT 0x00044 #define A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00045 #define A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00046 +#define A6XX_RBBM_BLOCK_GX_RETENTION_CNTL 0x00050 #define A6XX_RBBM_CLOCK_CNTL 0x000ae #define A6XX_RBBM_CLOCK_CNTL_SP0 0x000b0 #define A6XX_RBBM_CLOCK_CNTL_SP1 0x000b1 @@ -1026,6 +1028,11 @@ #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15 #define A6XX_GMU_AO_SPARE_CNTL 0x23B16 +/* RGMU GLM registers */ +#define A6XX_GMU_AO_RGMU_GLM_SLEEP_CTRL 0x23B80 +#define A6XX_GMU_AO_RGMU_GLM_SLEEP_STATUS 0x23B81 +#define A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE 0x23B82 + /* GMU RSC control registers */ #define A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x23404 #define A6XX_GMU_RSCC_CONTROL_REQ 0x23B07 @@ -1054,9 +1061,9 @@ #define A6XX_RSCC_SEQ_BUSY_DRV0 0x23501 #define A6XX_RSCC_SEQ_MEM_0_DRV0 0x23580 #define A6XX_RSCC_TCS0_DRV0_STATUS 0x23746 -#define A6XX_RSCC_TCS1_DRV0_STATUS 0x238AE -#define A6XX_RSCC_TCS2_DRV0_STATUS 0x23A16 -#define A6XX_RSCC_TCS3_DRV0_STATUS 0x23B7E +#define A6XX_RSCC_TCS1_DRV0_STATUS 0x237EE +#define A6XX_RSCC_TCS2_DRV0_STATUS 0x23896 +#define A6XX_RSCC_TCS3_DRV0_STATUS 0x2393E /* GPU PDC sequencer registers in AOSS.RPMh domain */ #define PDC_GPU_ENABLE_PDC 0x1140 @@ -1093,5 +1100,16 @@ */ #define PDC_GPU_SEQ_MEM_0 0x0 +/* RGMU(PCC) registers in A6X_GMU_CX_0_NON_CONTEXT_DEC domain */ +#define A6XX_RGMU_CX_INTR_GEN_EN 0x1F80F +#define A6XX_RGMU_CX_RGMU_TIMER0 0x1F834 +#define A6XX_RGMU_CX_RGMU_TIMER1 0x1F835 +#define A6XX_RGMU_CX_PCC_CTRL 0x1F838 +#define A6XX_RGMU_CX_PCC_INIT_RESULT 0x1F839 +#define A6XX_RGMU_CX_PCC_BKPT_CFG 0x1F83A +#define A6XX_RGMU_CX_PCC_BKPT_ADDR 0x1F83B +#define A6XX_RGMU_CX_PCC_STATUS 0x1F83C +#define A6XX_RGMU_CX_PCC_DEBUG 0x1F83D + #endif /* _A6XX_REG_H */ diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 3a2bc36c7515738eb198c9a750844c7745776628..91991593982923bcae150a448b04704b785615fc 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -368,7 +368,27 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 5, .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC, + ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | + ADRENO_IOCOHERENT, + .sqefw_name = "a630_sqe.fw", + .zap_name = "a615_zap", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = SZ_512K, + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a630_gmu.bin", + .gpmu_major = 0x1, + .gpmu_minor = 0x003, + }, + { + .gpurev = ADRENO_REV_A618, + .core = 6, + .major = 1, + .minor = 8, + .patchid = ANY_ID, + .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | + ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | + ADRENO_IOCOHERENT, .sqefw_name = "a630_sqe.fw", .zap_name = "a615_zap", .gpudev = &adreno_a6xx_gpudev, @@ -387,7 +407,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .patchid = 0, .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT | - ADRENO_IFPC | ADRENO_ACD, + ADRENO_IFPC, .sqefw_name = "a630_sqe.fw", .zap_name = "a640_zap", .gpudev = &adreno_a6xx_gpudev, @@ -409,7 +429,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT | - ADRENO_IFPC, + ADRENO_IFPC | ADRENO_PREEMPTION, .sqefw_name = "a630_sqe.fw", .zap_name = "a640_zap", .gpudev = &adreno_a6xx_gpudev, @@ -448,12 +468,33 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 8, .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION | - ADRENO_IOCOHERENT | ADRENO_PREEMPTION, + ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU, .sqefw_name = "a630_sqe.fw", .zap_name = "a608_zap", .gpudev = &adreno_a6xx_gpudev, .gmem_size = (SZ_128K + SZ_4K), .num_protected_regs = 0x20, .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a608_rgmu.bin", + .cx_ipeak_gpu_freq = 745000000, + }, + { + .gpurev = ADRENO_REV_A616, + .core = 6, + .major = 1, + .minor = 6, + .patchid = ANY_ID, + .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_PREEMPTION | + ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IFPC | + ADRENO_IOCOHERENT, + .sqefw_name = "a630_sqe.fw", + .zap_name = "a615_zap", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = SZ_512K, + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a630_gmu.bin", + .gpmu_major = 0x1, + .gpmu_minor = 0x003, }, }; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index fd22fa6d2e276834085a5b73d2c8b3cb21fe976d..f4174e06fba23d1ef17a8d5e80166688d7cee438 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1092,6 +1092,7 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev, struct device_node *node = pdev->dev.of_node; struct resource *res; unsigned int timeout; + unsigned int throt = 4; if (of_property_read_string(node, "label", &pdev->name)) { KGSL_CORE_ERR("Unable to read 'label'\n"); @@ -1120,6 +1121,14 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev, if (adreno_of_get_pwrlevels(adreno_dev, node)) return -EINVAL; + /* Get throttle power level */ + of_property_read_u32(node, "qcom,throttle-pwrlevel", &throt); + + if (throt < device->pwrctrl.num_pwrlevels) + device->pwrctrl.throttle_mask = + GENMASK(device->pwrctrl.num_pwrlevels - 1, + device->pwrctrl.num_pwrlevels - 1 - throt); + /* Get context aware DCVS properties */ adreno_of_get_ca_aware_properties(adreno_dev, node); @@ -1385,21 +1394,19 @@ static int adreno_probe(struct platform_device *pdev) /* Get the system cache slice descriptor for GPU */ adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu"); - if (IS_ERR(adreno_dev->gpu_llc_slice)) { + if (IS_ERR(adreno_dev->gpu_llc_slice) && + PTR_ERR(adreno_dev->gpu_llc_slice) != -ENOENT) KGSL_DRV_WARN(device, "Failed to get GPU LLC slice descriptor %ld\n", - PTR_ERR(adreno_dev->gpu_llc_slice)); - adreno_dev->gpu_llc_slice = NULL; - } + PTR_ERR(adreno_dev->gpu_llc_slice)); /* Get the system cache slice descriptor for GPU pagetables */ adreno_dev->gpuhtw_llc_slice = adreno_llc_getd(&pdev->dev, "gpuhtw"); - if (IS_ERR(adreno_dev->gpuhtw_llc_slice)) { + if (IS_ERR(adreno_dev->gpuhtw_llc_slice) && + PTR_ERR(adreno_dev->gpuhtw_llc_slice) != -ENOENT) KGSL_DRV_WARN(device, "Failed to get gpuhtw LLC slice descriptor %ld\n", - PTR_ERR(adreno_dev->gpuhtw_llc_slice)); - adreno_dev->gpuhtw_llc_slice = NULL; - } + PTR_ERR(adreno_dev->gpuhtw_llc_slice)); #ifdef CONFIG_INPUT if (!device->pwrctrl.input_disable) { @@ -1476,10 +1483,8 @@ static int adreno_remove(struct platform_device *pdev) adreno_profile_close(adreno_dev); /* Release the system cache slice descriptor */ - if (adreno_dev->gpu_llc_slice) - adreno_llc_putd(adreno_dev->gpu_llc_slice); - if (adreno_dev->gpuhtw_llc_slice) - adreno_llc_putd(adreno_dev->gpuhtw_llc_slice); + adreno_llc_putd(adreno_dev->gpu_llc_slice); + adreno_llc_putd(adreno_dev->gpuhtw_llc_slice); kgsl_pwrscale_close(device); @@ -1867,7 +1872,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) status = kgsl_mmu_start(device); if (status) - goto error_pwr_off; + goto error_boot_oob_clear; status = adreno_ocmem_malloc(adreno_dev); if (status) { @@ -2091,6 +2096,11 @@ static int _adreno_start(struct adreno_device *adreno_dev) error_mmu_off: kgsl_mmu_stop(&device->mmu); +error_boot_oob_clear: + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear) && + ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) + gmu_dev_ops->oob_clear(adreno_dev, oob_boot_slumber); + error_pwr_off: /* set the state back to original state */ kgsl_pwrctrl_change_state(device, state); @@ -2163,10 +2173,8 @@ static int adreno_stop(struct kgsl_device *device) adreno_ocmem_free(adreno_dev); - if (adreno_dev->gpu_llc_slice) - adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice); - if (adreno_dev->gpuhtw_llc_slice) - adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice); + adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice); + adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice); /* Save active coresight registers if applicable */ adreno_coresight_stop(adreno_dev); @@ -2174,6 +2182,9 @@ static int adreno_stop(struct kgsl_device *device) /* Save physical performance counter values before GPU power down*/ adreno_perfcounter_save(adreno_dev); + if (GMU_DEV_OP_VALID(gmu_dev_ops, prepare_stop)) + gmu_dev_ops->prepare_stop(adreno_dev); + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 22bcc2b87c6fa61376b335f3523688b0d84a66a1..4706b3deab6c6f8905a984ad0f9ce967105f3769 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -219,6 +219,8 @@ enum adreno_gpurev { ADRENO_REV_A540 = 540, ADRENO_REV_A608 = 608, ADRENO_REV_A615 = 615, + ADRENO_REV_A616 = 616, + ADRENO_REV_A618 = 618, ADRENO_REV_A630 = 630, ADRENO_REV_A640 = 640, ADRENO_REV_A680 = 680, @@ -1283,11 +1285,22 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev) } ADRENO_TARGET(a608, ADRENO_REV_A608) -ADRENO_TARGET(a615, ADRENO_REV_A615) ADRENO_TARGET(a630, ADRENO_REV_A630) ADRENO_TARGET(a640, ADRENO_REV_A640) ADRENO_TARGET(a680, ADRENO_REV_A680) +/* + * All the derived chipsets from A615 needs to be added to this + * list such as A616, A618 etc. + */ +static inline int adreno_is_a615_family(struct adreno_device *adreno_dev) +{ + unsigned int rev = ADRENO_GPUREV(adreno_dev); + + return (rev == ADRENO_REV_A615 || rev == ADRENO_REV_A616 || + rev == ADRENO_REV_A618); +} + static inline int adreno_is_a630v1(struct adreno_device *adreno_dev) { return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) && @@ -1928,7 +1941,7 @@ static inline void adreno_perfcntr_active_oob_put( static inline bool adreno_has_sptprac_gdsc(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev) || adreno_is_a630(adreno_dev)) + if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) return true; else return false; diff --git a/drivers/gpu/msm/adreno_a4xx_preempt.c b/drivers/gpu/msm/adreno_a4xx_preempt.c index 058ac9c2cf4b28beaf8e454aae7fdc1d7f137ecb..75eb5a903c562a942da90d10076efb2ea02fe32f 100644 --- a/drivers/gpu/msm/adreno_a4xx_preempt.c +++ b/drivers/gpu/msm/adreno_a4xx_preempt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -305,57 +305,15 @@ static void a4xx_preempt_trig_state(struct adreno_device *adreno_dev) static struct adreno_ringbuffer *a4xx_next_ringbuffer( struct adreno_device *adreno_dev) { - struct adreno_ringbuffer *rb, *next = NULL; + struct adreno_ringbuffer *rb; int i; FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - if (!adreno_rb_empty(rb) && next == NULL) { - next = rb; - continue; - } - - if (!adreno_disp_preempt_fair_sched) - continue; - - switch (rb->starve_timer_state) { - case ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT: - if (!adreno_rb_empty(rb) && - adreno_dev->cur_rb != rb) { - rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT; - rb->sched_timer = jiffies; - } - break; - case ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT: - if (time_after(jiffies, rb->sched_timer + - msecs_to_jiffies( - adreno_dispatch_starvation_time))) { - rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED; - /* halt dispatcher to remove starvation */ - adreno_get_gpu_halt(adreno_dev); - } - break; - case ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED: - /* - * If the RB has not been running for the minimum - * time slice then allow it to run - */ - if (!adreno_rb_empty(rb) && time_before(jiffies, - adreno_dev->cur_rb->sched_timer + - msecs_to_jiffies(adreno_dispatch_time_slice))) - next = rb; - else - rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT; - break; - case ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED: - default: - break; - } + if (!adreno_rb_empty(rb)) + return rb; } - return next; + return NULL; } static void a4xx_preempt_clear_state(struct adreno_device *adreno_dev) @@ -490,27 +448,6 @@ static void a4xx_preempt_complete_state(struct adreno_device *adreno_dev) adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF; adreno_dev->next_rb = NULL; - if (adreno_disp_preempt_fair_sched) { - /* starved rb is now scheduled so unhalt dispatcher */ - if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED == - adreno_dev->cur_rb->starve_timer_state) - adreno_put_gpu_halt(adreno_dev); - adreno_dev->cur_rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED; - adreno_dev->cur_rb->sched_timer = jiffies; - /* - * If the outgoing RB is has commands then set the - * busy time for it - */ - if (!adreno_rb_empty(adreno_dev->prev_rb)) { - adreno_dev->prev_rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT; - adreno_dev->prev_rb->sched_timer = jiffies; - } else { - adreno_dev->prev_rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT; - } - } adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); prevrptr = adreno_get_rptr(adreno_dev->prev_rb); diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index fc5f28dca824cd994882917085221a3fc829155c..d72ce8236f752201a8f978ac7e3abae6b3aa17a4 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -66,7 +66,7 @@ static const struct adreno_vbif_data a640_gbif[] = { static const struct adreno_vbif_platform a6xx_vbif_platforms[] = { { adreno_is_a630, a630_vbif }, - { adreno_is_a615, a615_gbif }, + { adreno_is_a615_family, a615_gbif }, { adreno_is_a640, a640_gbif }, { adreno_is_a680, a640_gbif }, { adreno_is_a608, a615_gbif }, @@ -360,7 +360,7 @@ static const struct { unsigned int count; } a6xx_hwcg_registers[] = { {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}, - {adreno_is_a615, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)}, + {adreno_is_a615_family, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)}, {adreno_is_a640, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, {adreno_is_a680, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, {adreno_is_a608, a608_hwcg_regs, ARRAY_SIZE(a608_hwcg_regs)}, @@ -610,7 +610,7 @@ __get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a608(adreno_dev)) return 0x00000022; - else if (adreno_is_a615(adreno_dev)) + else if (adreno_is_a615_family(adreno_dev)) return 0x00000222; else return 0x00020202; @@ -621,7 +621,7 @@ __get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a608(adreno_dev)) return 0x00000011; - else if (adreno_is_a615(adreno_dev)) + else if (adreno_is_a615_family(adreno_dev)) return 0x00000111; else return 0x00010111; @@ -632,7 +632,7 @@ __get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a608(adreno_dev)) return 0x00000055; - else if (adreno_is_a615(adreno_dev)) + else if (adreno_is_a615_family(adreno_dev)) return 0x00000555; else return 0x00005555; @@ -746,7 +746,7 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev) + sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist)); - if (adreno_is_a615(adreno_dev) || adreno_is_a608(adreno_dev)) { + if (adreno_is_a615_family(adreno_dev) || adreno_is_a608(adreno_dev)) { for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) { r = &a615_pwrup_reglist[i]; kgsl_regread(KGSL_DEVICE(adreno_dev), @@ -854,6 +854,13 @@ static void a6xx_start(struct adreno_device *adreno_dev) /* Turn on performance counters */ kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1); + /* Turn on GX_MEM retention */ + if (gmu_core_isenabled(device) && adreno_is_a608(adreno_dev)) { + kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB); + /* For CP IPC interrupt */ + kgsl_regwrite(device, A6XX_RBBM_INT_2_MASK, 0x00000010); + } + if (of_property_read_u32(device->pdev->dev.of_node, "qcom,highest-bank-bit", &bit)) bit = MIN_HBB; @@ -901,9 +908,9 @@ static void a6xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) | (mal << 23) | (bit << 21)); - /* Set hang detection threshold to 0x1FFFFF * 16 cycles */ + /* Set hang detection threshold to 0x3FFFFF * 16 cycles */ kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL, - (1 << 30) | 0x1fffff); + (1 << 30) | 0x3fffff); kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1); @@ -1417,7 +1424,7 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev) static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev) { int i; - int64_t adj = 0; + int64_t adj = -1; uint32_t counts[ADRENO_GPMU_THROTTLE_COUNTERS]; struct adreno_busy_data *busy = &adreno_dev->busy_data; @@ -1433,12 +1440,12 @@ static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev) /* * The adjustment is the number of cycles lost to throttling, which * is calculated as a weighted average of the cycles throttled - * at 10%, 50%, and 90%. The adjustment is negative because in A6XX, + * at 15%, 50%, and 90%. The adjustment is negative because in A6XX, * the busy count includes the throttled cycles. Therefore, we want * to remove them to prevent appearing to be busier than * we actually are. */ - adj = -((counts[0] * 1) + (counts[1] * 5) + (counts[2] * 9)) / 10; + adj *= ((counts[0] * 15) + (counts[1] * 50) + (counts[2] * 90)) / 100; trace_kgsl_clock_throttling(0, counts[1], counts[2], counts[0], adj); @@ -1798,14 +1805,6 @@ static struct adreno_irq a6xx_irq = { .mask = A6XX_INT_MASK, }; -static struct adreno_snapshot_sizes a6xx_snap_sizes = { - .cp_pfp = 0x33, -}; - -static struct adreno_snapshot_data a6xx_snapshot_data = { - .sect_sizes = &a6xx_snap_sizes, -}; - static struct adreno_coresight_register a6xx_coresight_regs[] = { { A6XX_DBGC_CFG_DBGBUS_SEL_A }, { A6XX_DBGC_CFG_DBGBUS_SEL_B }, @@ -2701,7 +2700,7 @@ static const struct { int (*check)(struct adreno_device *adreno_dev); void (*func)(struct adreno_device *adreno_dev); } a6xx_efuse_funcs[] = { - { adreno_is_a615, a6xx_efuse_speed_bin }, + { adreno_is_a615_family, a6xx_efuse_speed_bin }, { adreno_is_a608, a6xx_efuse_speed_bin }, }; @@ -2908,6 +2907,18 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = { .offset_0 = ADRENO_REG_REGISTER_MAX, }; +static void a6xx_perfcounter_init(struct adreno_device *adreno_dev) +{ + /* + * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A608. + * Mark them as broken so that they can't be used. + */ + if (adreno_is_a608(adreno_dev)) { + a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN; + a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN; + } +} + static int a6xx_perfcounter_update(struct adreno_device *adreno_dev, struct adreno_perfcount_register *reg, bool update_reg) { @@ -2976,7 +2987,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .start = a6xx_start, .snapshot = a6xx_snapshot, .irq = &a6xx_irq, - .snapshot_data = &a6xx_snapshot_data, .irq_trace = trace_kgsl_a5xx_irq_status, .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS, .platform_setup = a6xx_platform_setup, @@ -3007,6 +3017,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .preemption_context_destroy = a6xx_preemption_context_destroy, .sptprac_is_on = a6xx_sptprac_is_on, .ccu_invalidate = a6xx_ccu_invalidate, + .perfcounter_init = a6xx_perfcounter_init, .perfcounter_update = a6xx_perfcounter_update, .coresight = {&a6xx_coresight, &a6xx_coresight_cx}, }; diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 42df65e0be1d198927127ad0ef2cf0848f01f2a7..6ac1594cb047408bcac5a89527ad20da024d75d6 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -48,10 +48,6 @@ static const unsigned int a6xx_gmu_registers[] = { 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965, 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC, 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01, - /* GPU RSCC */ - 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747, - 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897, - 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F, /* GMU AO */ 0x23B00, 0x23B16, 0x23C00, 0x23C00, /* GPU CC */ @@ -339,8 +335,6 @@ static int a6xx_gmu_hfi_start(struct kgsl_device *device) { struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - gmu_core_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK, - HFI_IRQ_MSGQ_MASK, 0); gmu_core_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1); if (timed_poll_check(device, @@ -465,6 +459,25 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device) return 0; } +static int _load_legacy_gmu_fw(struct kgsl_device *device, + struct gmu_device *gmu) +{ + const struct firmware *fw = gmu->fw_image; + u32 *fwptr = (u32 *)fw->data; + int i; + + if (fw->size > MAX_GMUFW_SIZE) + return -EINVAL; + + for (i = 0; i < (fw->size >> 2); i++) + gmu_core_regwrite(device, + A6XX_GMU_CM3_ITCM_START + i, fwptr[i]); + + /* Proceed only after the FW is written */ + wmb(); + return 0; +} + static int load_gmu_fw(struct kgsl_device *device) { struct gmu_device *gmu = KGSL_GMU_DEVICE(device); @@ -474,6 +487,10 @@ static int load_gmu_fw(struct kgsl_device *device) struct gmu_block_header *blk; struct gmu_memdesc *md; + if (adreno_is_a630(ADRENO_DEVICE(device)) || + adreno_is_a615_family(ADRENO_DEVICE(device))) + return _load_legacy_gmu_fw(device, gmu); + while (fw < (uint8_t *)gmu->fw_image->data + gmu->fw_image->size) { blk = (struct gmu_block_header *)fw; fw += sizeof(*blk); @@ -535,7 +552,7 @@ static int a6xx_gmu_oob_set(struct adreno_device *adreno_dev, if (!gmu_core_isenabled(device)) return 0; - if (!adreno_is_a630(adreno_dev) && !adreno_is_a615(adreno_dev)) { + if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) { set = BIT(30 - req * 2); check = BIT(31 - req); @@ -588,7 +605,7 @@ static inline void a6xx_gmu_oob_clear(struct adreno_device *adreno_dev, if (!gmu_core_isenabled(device)) return; - if (!adreno_is_a630(adreno_dev) && !adreno_is_a615(adreno_dev)) { + if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev)) { clear = BIT(31 - req * 2); if (req >= 6) { dev_err(&gmu->pdev->dev, @@ -1462,6 +1479,37 @@ static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, } } +static int a6xx_gmu_wait_for_active_transition( + struct adreno_device *adreno_dev) +{ + unsigned int reg, num_retries; + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + + if (!gmu_core_isenabled(device)) + return 0; + + gmu_core_regread(device, + A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); + + for (num_retries = 0; reg != GPU_HW_ACTIVE && num_retries < 100; + num_retries++) { + /* Wait for small time before trying again */ + udelay(5); + gmu_core_regread(device, + A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); + } + + if (reg == GPU_HW_ACTIVE) + return 0; + + dev_err(&gmu->pdev->dev, + "GMU failed to move to ACTIVE state, Current state: 0x%x\n", + reg); + + return -ETIMEDOUT; +} + struct gmu_dev_ops adreno_a6xx_gmudev = { .load_firmware = a6xx_gmu_load_firmware, .oob_set = a6xx_gmu_oob_set, @@ -1477,6 +1525,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, .snapshot = a6xx_gmu_snapshot, + .wait_for_active_transition = a6xx_gmu_wait_for_active_transition, .gmu2host_intr_mask = HFI_IRQ_MASK, .gmu_ao_intr_mask = GMU_AO_INT_MASK, }; diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index 8daeb894f0936e83b6b9ed79567c56bcb43af779..c4368c096cf7e467f857e2d661b9f19a4d34e820 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -241,6 +241,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) unsigned int contextidr, cntl; unsigned long flags; struct adreno_preemption *preempt = &adreno_dev->preempt; + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); cntl = (((preempt->preempt_level << 6) & 0xC0) | ((preempt->skipsaverestore << 9) & 0x200) | @@ -360,6 +361,26 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) upper_32_bits(gpuaddr), FENCE_STATUS_WRITEDROPPED1_MASK); + /* + * Above fence writes will make sure GMU comes out of + * IFPC state if its was in IFPC state but it doesn't + * guarantee that GMU FW actually moved to ACTIVE state + * i.e. wake-up from IFPC is complete. + * Wait for GMU to move to ACTIVE state before triggering + * preemption. This is require to make sure CP doesn't + * interrupt GMU during wake-up from IFPC. + */ + if (GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_active_transition)) { + if (gmu_dev_ops->wait_for_active_transition(adreno_dev)) { + adreno_set_preempt_state(adreno_dev, + ADRENO_PREEMPT_NONE); + + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(device); + return; + } + } + adreno_dev->next_rb = next; /* Start the timer to detect a stuck preemption */ @@ -697,6 +718,11 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev) if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU) return -ENODEV; + if (adreno_is_a608(adreno_dev)) { + adreno_dev->preempt.preempt_level = 0; + adreno_dev->preempt.skipsaverestore = 0; + } + INIT_WORK(&preempt->work, _a6xx_preemption_worker); setup_timer(&preempt->timer, _a6xx_preemption_timer, diff --git a/drivers/gpu/msm/adreno_a6xx_rgmu.c b/drivers/gpu/msm/adreno_a6xx_rgmu.c new file mode 100644 index 0000000000000000000000000000000000000000..f20d8e8dea2a75a13fc4ed36867b51e466a9be6f --- /dev/null +++ b/drivers/gpu/msm/adreno_a6xx_rgmu.c @@ -0,0 +1,540 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include + +#include "kgsl_gmu_core.h" +#include "kgsl_rgmu.h" +#include "kgsl_trace.h" + +#include "adreno.h" +#include "a6xx_reg.h" +#include "adreno_a6xx.h" +#include "adreno_trace.h" +#include "adreno_snapshot.h" + +/* RGMU timeouts */ +#define RGMU_IDLE_TIMEOUT 100 /* ms */ +#define RGMU_START_TIMEOUT 100 /* ms */ +#define GPU_START_TIMEOUT 100 /* ms */ +#define GLM_SLEEP_TIMEOUT 10 /* ms */ + +static const unsigned int a6xx_rgmu_registers[] = { + /* GMU CX */ + 0x1F80F, 0x1F83D, 0x1F840, 0x1F8D8, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9CC, + /* GMU AO */ + 0x23B03, 0x23B16, 0x23B80, 0x23B82, + /* GPU CC */ + 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B, + 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440, + 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802, + 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02, + 0x26000, 0x26002, +}; + +irqreturn_t rgmu_irq_handler(int irq, void *data) +{ + struct kgsl_device *device = data; + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int status = 0; + + adreno_read_gmureg(adreno_dev, + ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status); + + if (status & RGMU_AO_IRQ_FENCE_ERR) { + unsigned int fence_status; + + adreno_read_gmureg(adreno_dev, + ADRENO_REG_GMU_AHB_FENCE_STATUS, &fence_status); + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status); + + dev_err_ratelimited(&rgmu->pdev->dev, + "FENCE error interrupt received %x\n", fence_status); + } + + if (status & ~RGMU_AO_IRQ_MASK) + dev_err_ratelimited(&rgmu->pdev->dev, + "Unhandled RGMU interrupts 0x%lx\n", + status & ~RGMU_AO_IRQ_MASK); + + return IRQ_HANDLED; +} + +irqreturn_t oob_irq_handler(int irq, void *data) +{ + struct kgsl_device *device = data; + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int status = 0; + + adreno_read_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status); + + if (status & RGMU_OOB_IRQ_ERR_MSG) { + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status); + + dev_err_ratelimited(&rgmu->pdev->dev, + "RGMU oob irq error\n"); + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(device); + } + if (status & ~RGMU_OOB_IRQ_MASK) + dev_err_ratelimited(&rgmu->pdev->dev, + "Unhandled OOB interrupts 0x%lx\n", + status & ~RGMU_OOB_IRQ_MASK); + + return IRQ_HANDLED; +} + +/* + * a6xx_rgmu_oob_set() - Set OOB interrupt to RGMU + * @adreno_dev: Pointer to adreno device + * @req: Which of the OOB bits to request + */ +static int a6xx_rgmu_oob_set(struct adreno_device *adreno_dev, + enum oob_request req) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + int ret, set, check; + + /* Return silently for unsupported OOBs */ + if (!gmu_core_isenabled(device) || req > oob_gpu) + return 0; + + set = BIT(req + 16); + check = BIT(req + 16); + + gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set); + + ret = timed_poll_check(device, + A6XX_GMU_GMU2HOST_INTR_INFO, + check, + GPU_START_TIMEOUT, + check); + + if (ret) { + unsigned int status; + + gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &status); + dev_err(&rgmu->pdev->dev, + "Timed out while setting OOB req:%s status:0x%x\n", + gmu_core_oob_type_str(req), status); + return ret; + } + + gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, check); + trace_kgsl_gmu_oob_set(set); + return 0; +} + +/* + * a6xx_rgmu_oob_clear() - Clear a previously set OOB request. + * @adreno_dev: Pointer to the adreno device that has the RGMU + * @req: Which of the OOB bits to clear + */ +static inline void a6xx_rgmu_oob_clear(struct adreno_device *adreno_dev, + enum oob_request req) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + /* Return silently for unsupported OOBs */ + if (!gmu_core_isenabled(device) || req > oob_gpu) + return; + + gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(req + 24)); + trace_kgsl_gmu_oob_clear(BIT(req + 24)); +} + +static void a6xx_rgmu_bcl_config(struct kgsl_device *device, bool on) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + if (on) { + /* Enable BCL CRC HW i/f */ + gmu_core_regwrite(device, + A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE, 0); + } else { + /* Disable CRC HW i/f */ + gmu_core_regwrite(device, + A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE, 1); + + /* Wait for HW CRC disable ACK */ + if (timed_poll_check(device, + A6XX_GMU_AO_RGMU_GLM_SLEEP_STATUS, + BIT(1), GLM_SLEEP_TIMEOUT, BIT(1))) + dev_err_ratelimited(&rgmu->pdev->dev, + "Timed out waiting for HW CRC disable acknowledgment\n"); + + /* Pull down the valid RGMU_GLM_SLEEP_CTRL[7] to 0 */ + gmu_core_regrmw(device, A6XX_GMU_AO_RGMU_GLM_SLEEP_CTRL, + BIT(7), 0); + + } +} + +static void a6xx_rgmu_irq_enable(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + /* Clear pending IRQs and Unmask needed IRQs */ + adreno_gmu_clear_and_unmask_irqs(ADRENO_DEVICE(device)); + + /* Enable all IRQs on host */ + enable_irq(rgmu->oob_interrupt_num); + enable_irq(rgmu->rgmu_interrupt_num); +} + +static void a6xx_rgmu_irq_disable(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + /* Disable all IRQs on host */ + disable_irq(rgmu->rgmu_interrupt_num); + disable_irq(rgmu->oob_interrupt_num); + + /* Mask all IRQs and clear pending IRQs */ + adreno_gmu_mask_and_clear_irqs(ADRENO_DEVICE(device)); +} + +static int a6xx_rgmu_ifpc_store(struct adreno_device *adreno_dev, + unsigned int val) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + unsigned int requested_idle_level; + + if (!gmu_core_isenabled(device) || + !ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) + return -EINVAL; + + if (val) + requested_idle_level = GPU_HW_IFPC; + else + requested_idle_level = GPU_HW_ACTIVE; + + if (requested_idle_level == rgmu->idle_level) + return 0; + + mutex_lock(&device->mutex); + + /* Power down the GPU before changing the idle level */ + kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND); + rgmu->idle_level = requested_idle_level; + kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER); + + mutex_unlock(&device->mutex); + + return 0; +} + +static unsigned int a6xx_rgmu_ifpc_show(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + return gmu_core_isenabled(device) && rgmu->idle_level == GPU_HW_IFPC; +} + + +static void a6xx_rgmu_prepare_stop(struct adreno_device *adreno_dev) +{ + /* Turn off GX_MEM retention */ + kgsl_regwrite(KGSL_DEVICE(adreno_dev), + A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0); +} + +#define GX_GDSC_POWER_OFF BIT(6) +/* + * a6xx_rgmu_gx_is_on() - Check if GX is on using pwr status register + * @adreno_dev - Pointer to adreno_device + * This check should only be performed if the keepalive bit is set or it + * can be guaranteed that the power state of the GPU will remain unchanged + */ +static bool a6xx_rgmu_gx_is_on(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int val; + + gmu_core_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val); + return !(val & GX_GDSC_POWER_OFF); +} + +static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + unsigned int reg; + unsigned long t; + + if (!gmu_core_isenabled(device) || + rgmu->idle_level != GPU_HW_IFPC) + return 0; + + t = jiffies + msecs_to_jiffies(RGMU_IDLE_TIMEOUT); + do { + gmu_core_regread(device, + A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, ®); + + if (reg & GX_GDSC_POWER_OFF) + return 0; + + /* Wait 10us to reduce unnecessary AHB bus traffic */ + usleep_range(10, 100); + } while (!time_after(jiffies, t)); + + dev_err(&rgmu->pdev->dev, "Timeout waiting for lowest idle:%x\n", reg); + return -ETIMEDOUT; +} + +/* + * The lowest 16 bits of this value are the number of XO clock cycles + * for main hysteresis. This is the first hysteresis. Here we set it + * to 0x1680 cycles, or 300 us. The highest 16 bits of this value are + * the number of XO clock cycles for short hysteresis. This happens + * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us. + */ +#define RGMU_PWR_COL_HYST 0x000A1680 + +/* HOSTTOGMU and TIMER0/1 interrupt mask: 0x20060 */ +#define RGMU_INTR_EN_MASK (BIT(5) | BIT(6) | BIT(17)) + +/* RGMU FENCE RANGE MASK */ +#define RGMU_FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0)) + +/* + * a6xx_rgmu_fw_start() - set up GMU and start FW + * @device: Pointer to KGSL device + * @boot_state: State of the rgmu being started + */ +static int a6xx_rgmu_fw_start(struct kgsl_device *device, + unsigned int boot_state) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + unsigned int status; + int i; + + switch (boot_state) { + case GMU_COLD_BOOT: + case GMU_WARM_BOOT: + /* Turn on TCM retention */ + gmu_core_regwrite(device, A6XX_GMU_GENERAL_7, 1); + + /* Load RGMU FW image via AHB bus */ + for (i = 0; i < rgmu->fw_size; i++) + gmu_core_regwrite(device, A6XX_GMU_CM3_ITCM_START + i, + rgmu->fw_hostptr[i]); + /* + * Enable power counter because it was disabled before + * slumber. + */ + gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, + 1); + break; + } + + /* IFPC Feature Enable */ + if (rgmu->idle_level == GPU_HW_IFPC) { + gmu_core_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST, + RGMU_PWR_COL_HYST); + gmu_core_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, + BIT(0)); + } + + /* For RGMU CX interrupt */ + gmu_core_regwrite(device, A6XX_RGMU_CX_INTR_GEN_EN, RGMU_INTR_EN_MASK); + + /* Enable GMU AO to host interrupt */ + gmu_core_regwrite(device, A6XX_GMU_AO_INTERRUPT_EN, RGMU_AO_IRQ_MASK); + + /* For OOB */ + gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_EN_2, 0x00FF0000); + gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_EN_3, 0xFF000000); + + /* Fence Address range configuration */ + gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0, + RGMU_FENCE_RANGE_MASK); + + /* During IFPC RGMU will put fence in drop mode so we would + * need to put fence allow mode during slumber out sequence. + */ + gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + + /* BCL ON Sequence */ + a6xx_rgmu_bcl_config(device, true); + + /* Write 0 first to make sure that rgmu is reset */ + gmu_core_regwrite(device, A6XX_RGMU_CX_PCC_CTRL, 0); + + /* Make sure putting in reset doesn't happen after writing 1 */ + wmb(); + + /* Bring rgmu out of reset */ + gmu_core_regwrite(device, A6XX_RGMU_CX_PCC_CTRL, 1); + + if (timed_poll_check(device, A6XX_RGMU_CX_PCC_INIT_RESULT, + BIT(0), RGMU_START_TIMEOUT, BIT(0))) { + gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &status); + dev_err(&rgmu->pdev->dev, + "rgmu boot Failed. status:%08x\n", status); + return -ETIMEDOUT; + } + + return 0; +} + +static int a6xx_rgmu_suspend(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int ret = 0; + + /* Check GX GDSC is status */ + if (a6xx_rgmu_gx_is_on(adreno_dev)) { + + /* Switch gx gdsc control from RGMU to CPU + * force non-zero reference count in clk driver + * so next disable call will turn + * off the GDSC + */ + ret = regulator_enable(rgmu->gx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Fail to enable gx gdsc, error:%d\n", ret); + + ret = regulator_disable(rgmu->gx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Fail to disable gx gdsc, error:%d\n", ret); + + if (a6xx_rgmu_gx_is_on(adreno_dev)) + dev_err(&rgmu->pdev->dev, "gx is stuck on\n"); + } + + return ret; +} + +/* + * a6xx_rgmu_gpu_pwrctrl() - GPU power control via rgmu interface + * @adreno_dev: Pointer to adreno device + * @mode: requested power mode + * @arg1: first argument for mode control + * @arg2: second argument for mode control + */ +static int a6xx_rgmu_gpu_pwrctrl(struct adreno_device *adreno_dev, + unsigned int mode, unsigned int arg1, unsigned int arg2) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + int ret = 0; + + if (!gmu_core_isenabled(device)) + return 0; + + switch (mode) { + case GMU_FW_START: + ret = a6xx_rgmu_fw_start(device, arg1); + break; + case GMU_SUSPEND: + ret = a6xx_rgmu_suspend(device); + break; + case GMU_NOTIFY_SLUMBER: + + /* Disable the power counter so that the RGMU is not busy */ + gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, + 0); + + /* BCL OFF Sequence */ + a6xx_rgmu_bcl_config(device, false); + + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * a6xx_rgmu_load_firmware() - Load the ucode into the RGMU TCM + * @device: Pointer to KGSL device + */ +static int a6xx_rgmu_load_firmware(struct kgsl_device *device) +{ + const struct firmware *fw = NULL; + const struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + const struct adreno_gpu_core *gpucore = adreno_dev->gpucore; + int ret; + + if (!gmu_core_isenabled(device)) + return 0; + + /* RGMU fw already saved and verified so do nothing new */ + if (rgmu->fw_hostptr) + return 0; + + ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev); + if (ret < 0) { + KGSL_CORE_ERR("request_firmware (%s) failed: %d\n", + gpucore->gpmufw_name, ret); + return ret; + } + + rgmu->fw_hostptr = devm_kmemdup(&rgmu->pdev->dev, fw->data, + fw->size, GFP_KERNEL); + + if (rgmu->fw_hostptr) + rgmu->fw_size = (fw->size / sizeof(u32)); + + release_firmware(fw); + return rgmu->fw_hostptr ? 0 : -ENOMEM; +} + +/* + * a6xx_rgmu_snapshot() - A6XX GMU snapshot function + * @adreno_dev: Device being snapshotted + * @snapshot: Pointer to the snapshot instance + * + * This is where all of the A6XX GMU specific bits and pieces are grabbed + * into the snapshot memory + */ +static void a6xx_rgmu_snapshot(struct adreno_device *adreno_dev, + struct kgsl_snapshot *snapshot) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + adreno_snapshot_registers(device, snapshot, a6xx_rgmu_registers, + ARRAY_SIZE(a6xx_rgmu_registers) / 2); +} + +struct gmu_dev_ops adreno_a6xx_rgmudev = { + .load_firmware = a6xx_rgmu_load_firmware, + .oob_set = a6xx_rgmu_oob_set, + .oob_clear = a6xx_rgmu_oob_clear, + .irq_enable = a6xx_rgmu_irq_enable, + .irq_disable = a6xx_rgmu_irq_disable, + .rpmh_gpu_pwrctrl = a6xx_rgmu_gpu_pwrctrl, + .gx_is_on = a6xx_rgmu_gx_is_on, + .prepare_stop = a6xx_rgmu_prepare_stop, + .wait_for_lowest_idle = a6xx_rgmu_wait_for_lowest_idle, + .ifpc_store = a6xx_rgmu_ifpc_store, + .ifpc_show = a6xx_rgmu_ifpc_show, + .snapshot = a6xx_rgmu_snapshot, + .gmu2host_intr_mask = RGMU_OOB_IRQ_MASK, + .gmu_ao_intr_mask = RGMU_AO_IRQ_MASK, +}; diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c index e04ec92bf52e3bc8f58b677de9da68ea00fee39b..748e2e806ec1eab10252d811648f0d4a87825d4e 100644 --- a/drivers/gpu/msm/adreno_a6xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c @@ -56,6 +56,24 @@ static const unsigned int a6xx_pc_vs_cluster[] = { 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07, }; +static const unsigned int a630_rscc_snapshot_registers[] = { + 0x23400, 0x23434, 0x23436, 0x23436, 0x23480, 0x23484, 0x23489, 0x2348C, + 0x23491, 0x23494, 0x23499, 0x2349C, 0x234A1, 0x234A4, 0x234A9, 0x234AC, + 0x23500, 0x23502, 0x23504, 0x23507, 0x23514, 0x23519, 0x23524, 0x2352B, + 0x23580, 0x23597, 0x23740, 0x23741, 0x23744, 0x23747, 0x2374C, 0x23787, + 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, + 0x2393C, 0x2393F, 0x23944, 0x2397F, +}; + +static const unsigned int a6xx_rscc_snapshot_registers[] = { + 0x23400, 0x23434, 0x23436, 0x23436, 0x23440, 0x23440, 0x23480, 0x23484, + 0x23489, 0x2348C, 0x23491, 0x23494, 0x23499, 0x2349C, 0x234A1, 0x234A4, + 0x234A9, 0x234AC, 0x23500, 0x23502, 0x23504, 0x23507, 0x23514, 0x23519, + 0x23524, 0x2352B, 0x23580, 0x23597, 0x23740, 0x23741, 0x23744, 0x23747, + 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897, + 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F, +}; + static const struct sel_reg { unsigned int host_reg; unsigned int cd_reg; @@ -1131,7 +1149,7 @@ static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device, block_id = block->block_id; /* GMU_GX data is read using the GMU_CX block id on A630 */ - if ((adreno_is_a630(adreno_dev) || adreno_is_a615(adreno_dev)) && + if ((adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) && (block_id == A6XX_DBGBUS_GMU_GX)) block_id = A6XX_DBGBUS_GMU_CX; @@ -1493,7 +1511,6 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); - struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; bool sptprac_on, gx_on = true; unsigned int i, roq_size; @@ -1542,10 +1559,18 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]); } + if (adreno_is_a615_family(adreno_dev) || adreno_is_a630(adreno_dev)) + adreno_snapshot_registers(device, snapshot, + a630_rscc_snapshot_registers, + ARRAY_SIZE(a630_rscc_snapshot_registers) / 2); + else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) + adreno_snapshot_registers(device, snapshot, + a6xx_rscc_snapshot_registers, + ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2); + /* CP_SQE indexed registers */ kgsl_snapshot_indexed_registers(device, snapshot, - A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, - 0, snap_data->sect_sizes->cp_pfp); + A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, 0, 0x33); /* CP_DRAW_STATE */ kgsl_snapshot_indexed_registers(device, snapshot, diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 419f313acaa8f55c36d5f0f23f4e4d9c1ea6a04a..25f913102b2d69459c84fbda78cbbea8fdf41adb 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -26,21 +26,6 @@ #define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) -/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */ -unsigned int adreno_dispatch_starvation_time = 2000; - -/* Amount of time in ms that a starved RB is permitted to execute for */ -unsigned int adreno_dispatch_time_slice = 25; - -/* - * If set then dispatcher tries to schedule lower priority RB's after if they - * have commands in their pipe and have been inactive for - * _dispatch_starvation_time. Also, once an RB is schduled it will be allowed - * to run for _dispatch_time_slice unless it's commands complete before - * _dispatch_time_slice - */ -unsigned int adreno_disp_preempt_fair_sched; - /* Number of commands that can be queued in a context before it sleeps */ static unsigned int _context_drawqueue_size = 50; @@ -1716,8 +1701,9 @@ static void adreno_fault_header(struct kgsl_device *device, ib2base, ib2sz, drawctxt->rb->id); pr_fault(device, drawobj, - "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", - drawobj->context->id, drawobj->timestamp, status, + "gpu fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", + drawobj->context->id, get_api_type_str(drawctxt->type), + drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz); if (rb != NULL) @@ -2180,12 +2166,6 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) adreno_dev->cur_rb = hung_rb; } } - if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED == - rb->starve_timer_state) { - adreno_put_gpu_halt(adreno_dev); - rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT; - } } if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) { @@ -2762,12 +2742,6 @@ static DISPATCHER_UINT_ATTR(fault_throttle_time, 0644, 0, _fault_throttle_time); static DISPATCHER_UINT_ATTR(fault_throttle_burst, 0644, 0, _fault_throttle_burst); -static DISPATCHER_UINT_ATTR(disp_preempt_fair_sched, 0644, 0, - adreno_disp_preempt_fair_sched); -static DISPATCHER_UINT_ATTR(dispatch_time_slice, 0644, 0, - adreno_dispatch_time_slice); -static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0, - adreno_dispatch_starvation_time); static struct attribute *dispatcher_attrs[] = { &dispatcher_attr_inflight.attr, @@ -2779,9 +2753,6 @@ static struct attribute *dispatcher_attrs[] = { &dispatcher_attr_fault_detect_interval.attr, &dispatcher_attr_fault_throttle_time.attr, &dispatcher_attr_fault_throttle_burst.attr, - &dispatcher_attr_disp_preempt_fair_sched.attr, - &dispatcher_attr_dispatch_time_slice.attr, - &dispatcher_attr_dispatch_starvation_time.attr, NULL, }; diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 61bd06f4d37326cb2756207b411e8afe941dcbaf..3cceeec54983702e4dadbb83f1cacea5016c8f9a 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -14,29 +14,7 @@ #ifndef ____ADRENO_DISPATCHER_H #define ____ADRENO_DISPATCHER_H -extern unsigned int adreno_disp_preempt_fair_sched; extern unsigned int adreno_drawobj_timeout; -extern unsigned int adreno_dispatch_starvation_time; -extern unsigned int adreno_dispatch_time_slice; - -/** - * enum adreno_dispatcher_starve_timer_states - Starvation control states of - * a RB - * @ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT: Uninitialized, starvation control - * is not operating - * @ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT: Starvation timer is initialized - * and counting - * @ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED: The starvation timer has elapsed - * this state indicates that the RB is starved - * @ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED: RB is scheduled on the device - * and will remain scheduled for a minimum time slice when in this state. - */ -enum adreno_dispatcher_starve_timer_states { - ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT = 0, - ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT = 1, - ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED = 2, - ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED = 3, -}; /* * Maximum size of the dispatcher ringbuffer - the actual inflight size will be @@ -78,9 +56,6 @@ struct adreno_dispatcher_drawqueue { * @work: work_struct to put the dispatcher in a work queue * @kobj: kobject for the dispatcher directory in the device sysfs node * @idle_gate: Gate to wait on for dispatcher to idle - * @disp_preempt_fair_sched: If set then dispatcher will try to be fair to - * starving RB's by scheduling them in and enforcing a minimum time slice - * for every RB that is scheduled to run on the device */ struct adreno_dispatcher { struct mutex mutex; @@ -94,7 +69,6 @@ struct adreno_dispatcher { struct kthread_work work; struct kobject kobj; struct completion idle_gate; - unsigned int disp_preempt_fair_sched; }; enum adreno_dispatcher_flags { diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index eef506ff46f047647954de6cfb719301879985fd..466d1e2dd5d732066eaf58ab7836162b58751db8 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -139,4 +139,16 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, void adreno_drawctxt_dump(struct kgsl_device *device, struct kgsl_context *context); +static struct adreno_context_type ctxt_type_table[] = {KGSL_CONTEXT_TYPES}; + +static inline const char *get_api_type_str(unsigned int type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) { + if (ctxt_type_table[i].type == type) + return ctxt_type_table[i].str; + } + return "UNKNOWN"; +} #endif /* __ADRENO_DRAWCTXT_H */ diff --git a/drivers/gpu/msm/adreno_llc.h b/drivers/gpu/msm/adreno_llc.h index 90dff82f3059ce202a161fa3a5f4a91403b7d4c9..2fa2202933c9a70b5e39db0056a77cb7e7aba22b 100644 --- a/drivers/gpu/msm/adreno_llc.h +++ b/drivers/gpu/msm/adreno_llc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,18 +29,21 @@ static inline void *adreno_llc_getd(struct device *dev, const char *name) static inline void adreno_llc_putd(void *desc) { - llcc_slice_putd(desc); + if (!IS_ERR(desc)) + llcc_slice_putd(desc); } static inline int adreno_llc_deactivate_slice(void *desc) { - return llcc_slice_deactivate(desc); + if (IS_ERR(desc)) + return PTR_ERR(desc); + else + return llcc_slice_deactivate(desc); } static inline int adreno_llc_get_scid(void *desc) { return llcc_get_slice_id(desc); - } static inline void adreno_llc_setup(struct kgsl_device *device) @@ -48,20 +51,24 @@ static inline void adreno_llc_setup(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); - if (adreno_dev->gpu_llc_slice && adreno_dev->gpu_llc_slice_enable) + if (!IS_ERR(adreno_dev->gpu_llc_slice) && + adreno_dev->gpu_llc_slice_enable) if (!llcc_slice_activate(adreno_dev->gpu_llc_slice)) { if (gpudev->llc_configure_gpu_scid) gpudev->llc_configure_gpu_scid(adreno_dev); } - if (adreno_dev->gpuhtw_llc_slice && adreno_dev->gpuhtw_llc_slice_enable) + if (!IS_ERR(adreno_dev->gpuhtw_llc_slice) && + adreno_dev->gpuhtw_llc_slice_enable) if (!llcc_slice_activate(adreno_dev->gpuhtw_llc_slice)) { if (gpudev->llc_configure_gpuhtw_scid) gpudev->llc_configure_gpuhtw_scid(adreno_dev); } - if (gpudev->llc_enable_overrides) - gpudev->llc_enable_overrides(adreno_dev); + if (adreno_dev->gpu_llc_slice_enable || + adreno_dev->gpuhtw_llc_slice_enable) + if (gpudev->llc_enable_overrides) + gpudev->llc_enable_overrides(adreno_dev); } #else diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index 79f16e53941c6675236388d52101c88f7643491b..96c37aacf7ddca8cfbb8bfc16aa5a2f9bb50ad78 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -78,19 +78,6 @@ #define SIZE_PIPE_ENTRY(cnt) (50 + (cnt) * 62) #define SIZE_LOG_ENTRY(cnt) (6 + (cnt) * 5) -static struct adreno_context_type ctxt_type_table[] = {KGSL_CONTEXT_TYPES}; - -static const char *get_api_type_str(unsigned int type) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) { - if (ctxt_type_table[i].type == type) - return ctxt_type_table[i].str; - } - return "UNKNOWN"; -} - static inline uint _ib_start(struct adreno_device *adreno_dev, unsigned int *cmds) { diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index f105b9b9d33f8f34b97dea4332d888d5988ba817..08a4d66c978493145b2131054cbcaff686f90359 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -253,8 +253,6 @@ int adreno_ringbuffer_start(struct adreno_device *adreno_dev, rb->wptr = 0; rb->_wptr = 0; rb->wptr_preempt_end = 0xFFFFFFFF; - rb->starve_timer_state = - ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT; } /* start is specific GPU rb */ diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index a4dc901cf0b6e83ef60e1a67abf0a1bff479f937..5efe72626dfcc66035b659768e62ed73644170c3 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -107,9 +107,6 @@ struct adreno_ringbuffer_pagetable_info { * at the right rptr * @gpr11: The gpr11 value of this RB * @preempted_midway: Indicates that the RB was preempted before rptr = wptr - * @sched_timer: Timer that tracks how long RB has been waiting to be scheduled - * or how long it has been scheduled for after preempting in - * @starve_timer_state: Indicates the state of the wait. * @preempt_lock: Lock to protect the wptr pointer while it is being updated */ struct adreno_ringbuffer { @@ -132,8 +129,6 @@ struct adreno_ringbuffer { unsigned int wptr_preempt_end; unsigned int gpr11; int preempted_midway; - unsigned long sched_timer; - enum adreno_dispatcher_starve_timer_states starve_timer_state; spinlock_t preempt_lock; }; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 56930dcc8f83a0030cebbc32d2799230a2866718..5e9d164944e07f75f78019a4d15243e000a9401a 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1047,24 +1047,6 @@ static void process_release_memory(struct kgsl_process_private *private) } } -static void process_release_sync_sources(struct kgsl_process_private *private) -{ - struct kgsl_syncsource *syncsource; - int next = 0; - - while (1) { - spin_lock(&private->syncsource_lock); - syncsource = idr_get_next(&private->syncsource_idr, &next); - spin_unlock(&private->syncsource_lock); - - if (syncsource == NULL) - break; - - kgsl_syncsource_cleanup(private, syncsource); - next = next + 1; - } -} - static void kgsl_process_private_close(struct kgsl_device_private *dev_priv, struct kgsl_process_private *private) { @@ -1083,7 +1065,8 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv, kgsl_process_uninit_sysfs(private); - process_release_sync_sources(private); + /* Release all syncsource objects from process private */ + kgsl_syncsource_process_release_syncsources(private); /* When using global pagetables, do not detach global pagetable */ if (private->pagetable->name != KGSL_MMU_GLOBAL_PT) @@ -2672,7 +2655,13 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, return -ENOMEM; attach = dma_buf_attach(dmabuf, device->dev); - attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + /* + * If dma buffer is marked IO coherent, skip sync at attach, + * which involves flushing the buffer on CPU. + * HW manages coherency for IO coherent buffers. + */ + if (entry->memdesc.flags & KGSL_MEMFLAGS_IOCOHERENT) + attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; if (IS_ERR_OR_NULL(attach)) { ret = attach ? PTR_ERR(attach) : -EINVAL; diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index f81328deaa2c19c37ab7c86ffca005a6f2e84af1..80d4027d344825432e3ab36dd313ea4b729ddb99 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -97,7 +97,8 @@ enum kgsl_event_results { { KGSL_CONTEXT_TYPE_GL, "GL" }, \ { KGSL_CONTEXT_TYPE_CL, "CL" }, \ { KGSL_CONTEXT_TYPE_C2D, "C2D" }, \ - { KGSL_CONTEXT_TYPE_RS, "RS" } + { KGSL_CONTEXT_TYPE_RS, "RS" }, \ + { KGSL_CONTEXT_TYPE_VK, "VK" } #define KGSL_CONTEXT_ID(_context) \ ((_context != NULL) ? (_context)->id : KGSL_MEMSTORE_GLOBAL) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 37b12a51ad0a5a252e60bdac7fcb01bc6def0cc7..dd798dc0c3b334884fde7d231fbca5e3d89206b0 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -459,7 +459,7 @@ static int gmu_memory_probe(struct kgsl_device *device, } /* Allocates & maps GMU crash dump memory */ - if (adreno_is_a630(adreno_dev)) { + if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); if (IS_ERR(gmu->dump_mem)) { @@ -1308,9 +1308,6 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node) disable_irq(hfi->hfi_interrupt_num); tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu); - INIT_LIST_HEAD(&hfi->msglist); - spin_lock_init(&hfi->msglock); - spin_lock_init(&hfi->read_queue_lock); hfi->kgsldev = device; /* Retrieves GMU/GPU power level configurations*/ diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h index 8619b9327165aa58b287697d6e78be1f1ef35020..aed3014d00537f7528a4fd828f8a487e2c52454f 100644 --- a/drivers/gpu/msm/kgsl_gmu.h +++ b/drivers/gpu/msm/kgsl_gmu.h @@ -18,7 +18,7 @@ #include #include "kgsl_hfi.h" -#define MAX_GMUFW_SIZE 0x2000 /* in bytes */ +#define MAX_GMUFW_SIZE 0x8000 /* in bytes */ #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 9421de861cb79124f645d5c5c1175f099e536e13..b61a0bf50891acf586ad33a246a11e322f286894 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -28,10 +28,34 @@ MODULE_PARM_DESC(nogmu, "Disable the GMU"); static const struct { char *compat; struct gmu_core_ops *core_ops; + enum gmu_coretype type; } gmu_subtypes[] = { - {"qcom,gpu-gmu", &gmu_ops}, + {"qcom,gpu-gmu", &gmu_ops, GMU_CORE_TYPE_CM3}, + {"qcom,gpu-rgmu", &rgmu_ops, GMU_CORE_TYPE_PCC}, }; +struct oob_entry { + enum oob_request req; + const char *str; +}; + +const char *gmu_core_oob_type_str(enum oob_request req) +{ + int i; + struct oob_entry table[] = { + { oob_gpu, "oob_gpu"}, + { oob_perfcntr, "oob_perfcntr"}, + { oob_preempt, "oob_preempt"}, + { oob_boot_slumber, "oob_boot_slumber"}, + { oob_dcvs, "oob_dcvs"}, + }; + + for (i = 0; i < ARRAY_SIZE(table); i++) + if (req == table[i].req) + return table[i].str; + return "UNKNOWN"; +} + int gmu_core_probe(struct kgsl_device *device) { struct device_node *node; @@ -45,8 +69,11 @@ int gmu_core_probe(struct kgsl_device *device) node = of_find_compatible_node(device->pdev->dev.of_node, NULL, gmu_subtypes[i].compat); - if (node != NULL) + if (node != NULL) { gmu_core_ops = gmu_subtypes[i].core_ops; + device->gmu_core.type = gmu_subtypes[i].type; + break; + } } /* No GMU in dt, no worries...hopefully */ @@ -85,6 +112,16 @@ bool gmu_core_gpmu_isenabled(struct kgsl_device *device) return test_bit(GMU_GPMU, &device->gmu_core.flags); } +bool gmu_core_scales_bandwidth(struct kgsl_device *device) +{ + if (device->gmu_core.type == GMU_CORE_TYPE_PCC) + return false; + else + return gmu_core_gpmu_isenabled(device) && + (adreno_is_a640(ADRENO_DEVICE(device)) || + adreno_is_a680(ADRENO_DEVICE(device))); +} + int gmu_core_start(struct kgsl_device *device) { struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index b2ecf2b1632ddb44bd1400ac40965eccbb1db3d7..f9d5d3f45fcd0af9d8c5d08fb1bfeadb7b0e07ee 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -59,6 +59,13 @@ enum gmu_core_flags { GMU_RSCC_SLEEP_SEQ_DONE, }; +/* GMU Types */ +enum gmu_coretype { + GMU_CORE_TYPE_CM3 = 1, /* Cortex M3 core */ + GMU_CORE_TYPE_PCC = 2, /* Power collapsible controller */ + GMU_CORE_TYPE_NONE, /* No GMU */ +}; + /* * OOB requests values. These range from 0 to 7 and then * the BIT() offset into the actual value is calculated @@ -136,6 +143,7 @@ struct gmu_dev_ops { enum oob_request req); void (*oob_clear)(struct adreno_device *adreno_dev, enum oob_request req); + void (*bcl_config)(struct adreno_device *adreno_dev, bool on); void (*irq_enable)(struct kgsl_device *device); void (*irq_disable)(struct kgsl_device *device); int (*hfi_start_msg)(struct adreno_device *adreno_dev); @@ -145,10 +153,12 @@ struct gmu_dev_ops { int (*wait_for_lowest_idle)(struct adreno_device *); int (*wait_for_gmu_idle)(struct adreno_device *); bool (*gx_is_on)(struct adreno_device *); + void (*prepare_stop)(struct adreno_device *); int (*ifpc_store)(struct adreno_device *adreno_dev, unsigned int val); unsigned int (*ifpc_show)(struct adreno_device *adreno_dev); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); + int (*wait_for_active_transition)(struct adreno_device *adreno_dev); const unsigned int gmu2host_intr_mask; const unsigned int gmu_ao_intr_mask; }; @@ -173,11 +183,13 @@ struct gmu_core_device { struct gmu_core_ops *core_ops; struct gmu_dev_ops *dev_ops; unsigned long flags; + enum gmu_coretype type; }; -/* GMU core functions */ extern struct gmu_core_ops gmu_ops; +extern struct gmu_core_ops rgmu_ops; +/* GMU core functions */ int gmu_core_probe(struct kgsl_device *device); void gmu_core_remove(struct kgsl_device *device); int gmu_core_start(struct kgsl_device *device); @@ -185,6 +197,7 @@ void gmu_core_stop(struct kgsl_device *device); int gmu_core_suspend(struct kgsl_device *device); void gmu_core_snapshot(struct kgsl_device *device); bool gmu_core_gpmu_isenabled(struct kgsl_device *device); +bool gmu_core_scales_bandwidth(struct kgsl_device *device); bool gmu_core_isenabled(struct kgsl_device *device); int gmu_core_dcvs_set(struct kgsl_device *device, unsigned int gpu_pwrlevel, unsigned int bus_level); @@ -198,4 +211,5 @@ void gmu_core_regwrite(struct kgsl_device *device, unsigned int offsetwords, unsigned int value); void gmu_core_regrmw(struct kgsl_device *device, unsigned int offsetwords, unsigned int mask, unsigned int bits); +const char *gmu_core_oob_type_str(enum oob_request req); #endif /* __KGSL_GMU_CORE_H */ diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index 6a469aa6cfdeb1b9577a272f8566ffa73031c1ba..476eabee2b727153277753734aa8e5bab7b9a2a5 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -12,13 +12,15 @@ */ #include "kgsl_device.h" +#include "kgsl_hfi.h" #include "kgsl_gmu.h" #include "adreno.h" #include "kgsl_trace.h" +#include "kgsl_pwrctrl.h" #define HFI_QUEUE_OFFSET(i) \ - ((sizeof(struct hfi_queue_table)) + \ - ((i) * HFI_QUEUE_SIZE)) + (ALIGN(sizeof(struct hfi_queue_table), SZ_16) + \ + ((i) * HFI_QUEUE_SIZE)) #define HOST_QUEUE_START_ADDR(hfi_mem, i) \ ((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i)) @@ -45,13 +47,13 @@ (((minor) & 0x7FFFFF) << 5) | \ ((branch) & 0x1F)) -static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx); +static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, + struct pending_cmd *ret_cmd); /* Size in below functions are in unit of dwords */ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, unsigned int *output, unsigned int max_size) { - struct kgsl_hfi *hfi = &gmu->hfi; struct gmu_memdesc *mem_addr = gmu->hfi_mem; struct hfi_queue_table *tbl = mem_addr->hostptr; struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx]; @@ -64,8 +66,6 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, if (hdr->status == HFI_QUEUE_STATUS_DISABLED) return -EINVAL; - spin_lock_bh(&hfi->read_queue_lock); - if (hdr->read_index == hdr->write_index) { hdr->rx_req = 1; result = -ENODATA; @@ -109,7 +109,6 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, hdr->read_index = read; done: - spin_unlock_bh(&hfi->read_queue_lock); return result; } @@ -194,6 +193,7 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, uint32_t queue_sz_bytes) { + struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev); int i; struct hfi_queue_table *tbl; struct hfi_queue_header *hdr; @@ -208,6 +208,17 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, { HFI_DSP_IDX_0, HFI_DSP_PRI_0, HFI_QUEUE_STATUS_DISABLED }, }; + /* + * Overwrite the queue IDs for A630, A615 and A616 as they use + * legacy firmware. Legacy firmware has different queue IDs for + * message, debug and dispatch queues. + */ + if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { + queue[HFI_MSG_ID].idx = HFI_MSG_IDX_LEGACY; + queue[HFI_DBG_ID].idx = HFI_DBG_IDX_LEGACY; + queue[HFI_DSP_ID_0].idx = HFI_DSP_IDX_0_LEGACY; + } + /* Fill Table Header */ tbl = mem_addr->hostptr; tbl->qtbl_hdr.version = 0; @@ -240,39 +251,27 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, #define HDR_CMP_SEQNUM(out_hdr, in_hdr) \ (MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr)) -static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd) +static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd, + struct pending_cmd *ret_cmd) { uint32_t *ack = rcvd; uint32_t hdr = ack[0]; uint32_t req_hdr = ack[1]; struct kgsl_hfi *hfi = &gmu->hfi; - struct pending_cmd *cmd = NULL; - uint32_t waiters[64], i = 0, j; trace_kgsl_hfi_receive(MSG_HDR_GET_ID(req_hdr), MSG_HDR_GET_SIZE(req_hdr), MSG_HDR_GET_SEQNUM(req_hdr)); - spin_lock_bh(&hfi->msglock); - list_for_each_entry(cmd, &hfi->msglist, node) { - if (HDR_CMP_SEQNUM(cmd->sent_hdr, req_hdr)) { - memcpy(&cmd->results, ack, MSG_HDR_GET_SIZE(hdr) << 2); - complete(&cmd->msg_complete); - spin_unlock_bh(&hfi->msglock); - return; - } - if (i < 64) - waiters[i++] = cmd->sent_hdr; + if (HDR_CMP_SEQNUM(ret_cmd->sent_hdr, req_hdr)) { + memcpy(&ret_cmd->results, ack, MSG_HDR_GET_SIZE(hdr) << 2); + return; } - spin_unlock_bh(&hfi->msglock); + /* Didn't find the sender, list the waiter */ dev_err_ratelimited(&gmu->pdev->dev, - "HFI ACK: Cannot find sender for 0x%8.8X\n", req_hdr); - /* Didn't find the sender, list all the waiters */ - for (j = 0; j < i && j < 64; j++) { - dev_err_ratelimited(&gmu->pdev->dev, - "HFI ACK: Waiters: 0x%8.8X\n", waiters[j]); - } + "HFI ACK: Cannot find sender for 0x%8.8x Waiter: 0x%8.8x\n", + req_hdr, ret_cmd->sent_hdr); adreno_set_gpu_fault(ADRENO_DEVICE(hfi->kgsldev), ADRENO_GMU_FAULT); adreno_dispatcher_schedule(hfi->kgsldev); @@ -281,6 +280,28 @@ static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd) #define MSG_HDR_SET_SEQNUM(hdr, num) \ (((hdr) & 0xFFFFF) | ((num) << 20)) +static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev, + enum adreno_regs offset_name, unsigned int expected_val, + unsigned int mask, unsigned int timeout_ms) +{ + unsigned int val; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + while (time_is_after_jiffies(timeout)) { + adreno_read_gmureg(adreno_dev, offset_name, &val); + if ((val & mask) == expected_val) + return 0; + usleep_range(10, 100); + } + + /* Check one last time */ + adreno_read_gmureg(adreno_dev, offset_name, &val); + if ((val & mask) == expected_val) + return 0; + + return -ETIMEDOUT; +} + static int hfi_send_cmd(struct gmu_device *gmu, uint32_t queue_idx, void *data, struct pending_cmd *ret_cmd) { @@ -288,42 +309,34 @@ static int hfi_send_cmd(struct gmu_device *gmu, uint32_t queue_idx, uint32_t *cmd = data; struct kgsl_hfi *hfi = &gmu->hfi; unsigned int seqnum = atomic_inc_return(&hfi->seqnum); + struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev); *cmd = MSG_HDR_SET_SEQNUM(*cmd, seqnum); if (ret_cmd == NULL) return hfi_queue_write(gmu, queue_idx, cmd); - init_completion(&ret_cmd->msg_complete); ret_cmd->sent_hdr = cmd[0]; - spin_lock_bh(&hfi->msglock); - list_add_tail(&ret_cmd->node, &hfi->msglist); - spin_unlock_bh(&hfi->msglock); - rc = hfi_queue_write(gmu, queue_idx, cmd); if (rc) - goto done; + return rc; + + rc = poll_adreno_gmu_reg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_INFO, + HFI_IRQ_MSGQ_MASK, HFI_IRQ_MSGQ_MASK, HFI_RSP_TIMEOUT); + + if (rc) { + dev_err(&gmu->pdev->dev, + "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n", + cmd[0], MSG_HDR_GET_ID(*cmd), MSG_HDR_GET_SEQNUM(*cmd)); + return rc; + } + + /* Clear the interrupt */ + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR, + HFI_IRQ_MSGQ_MASK); + + hfi_process_queue(gmu, HFI_MSG_ID, ret_cmd); - rc = wait_for_completion_timeout( - &ret_cmd->msg_complete, - msecs_to_jiffies(HFI_RSP_TIMEOUT)); - if (!rc) { - /* Check one more time to make sure there is no response */ - hfi_process_queue(gmu, HFI_MSG_IDX); - if (!completion_done(&ret_cmd->msg_complete)) { - dev_err(&gmu->pdev->dev, - "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n", - cmd[0], - MSG_HDR_GET_ID(*cmd), - MSG_HDR_GET_SEQNUM(*cmd)); - rc = -ETIMEDOUT; - } - } else - rc = 0; -done: - spin_lock_bh(&hfi->msglock); - list_del(&ret_cmd->node); - spin_unlock_bh(&hfi->msglock); return rc; } @@ -358,7 +371,7 @@ static int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state) .boot_state = boot_state, }; - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } static int hfi_get_fw_version(struct gmu_device *gmu, @@ -373,7 +386,7 @@ static int hfi_get_fw_version(struct gmu_device *gmu, memset(&ret_cmd, 0, sizeof(ret_cmd)); - rc = hfi_send_cmd(gmu, HFI_CMD_IDX, &cmd, &ret_cmd); + rc = hfi_send_cmd(gmu, HFI_CMD_ID, &cmd, &ret_cmd); if (rc) return rc; @@ -394,7 +407,7 @@ static int hfi_send_core_fw_start(struct gmu_device *gmu) .handle = 0x0, }; - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } static const char * const hfi_features[] = { @@ -422,7 +435,7 @@ static int hfi_send_feature_ctrl(struct gmu_device *gmu, }; int ret; - ret = hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + ret = hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); if (ret) dev_err(&gmu->pdev->dev, "Unable to %s feature %s (%d)\n", @@ -452,7 +465,7 @@ static int hfi_send_dcvstbl_v1(struct gmu_device *gmu) cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; } - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } static int hfi_send_get_value(struct gmu_device *gmu, @@ -466,7 +479,7 @@ static int hfi_send_get_value(struct gmu_device *gmu, cmd->hdr = CMD_MSG_HDR(H2F_MSG_GET_VALUE, sizeof(*cmd)); - rc = hfi_send_cmd(gmu, HFI_CMD_IDX, cmd, &ret_cmd); + rc = hfi_send_cmd(gmu, HFI_CMD_ID, cmd, &ret_cmd); if (rc) return rc; @@ -497,7 +510,7 @@ static int hfi_send_dcvstbl(struct gmu_device *gmu) cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; } - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } static int hfi_send_bwtbl(struct gmu_device *gmu) @@ -506,7 +519,7 @@ static int hfi_send_bwtbl(struct gmu_device *gmu) cmd->hdr = CMD_MSG_HDR(H2F_MSG_BW_VOTE_TBL, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd); } static int hfi_send_test(struct gmu_device *gmu) @@ -515,7 +528,7 @@ static int hfi_send_test(struct gmu_device *gmu) .hdr = CMD_MSG_HDR(H2F_MSG_TEST, sizeof(cmd)), }; - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } static void receive_err_req(struct gmu_device *gmu, void *rcvd) @@ -536,11 +549,12 @@ static void receive_debug_req(struct gmu_device *gmu, void *rcvd) cmd->type, cmd->timestamp, cmd->data); } -static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd) +static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd, + struct pending_cmd *ret_cmd) { /* V1 ACK Handler */ if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_V1_MSG_ACK) { - receive_ack_cmd(gmu, rcvd); + receive_ack_cmd(gmu, rcvd, ret_cmd); return; } @@ -560,20 +574,21 @@ static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd) } } -static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx) +static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx, + struct pending_cmd *ret_cmd) { uint32_t rcvd[MAX_RCVD_SIZE]; while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) { /* Special case if we're v1 */ if (HFI_VER_MAJOR(&gmu->hfi) < 2) { - hfi_v1_receiver(gmu, rcvd); + hfi_v1_receiver(gmu, rcvd, ret_cmd); continue; } /* V2 ACK Handler */ if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) { - receive_ack_cmd(gmu, rcvd); + receive_ack_cmd(gmu, rcvd, ret_cmd); continue; } @@ -596,9 +611,8 @@ static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx) void hfi_receiver(unsigned long data) { - /* Process all read (firmware to host) queues */ - hfi_process_queue((struct gmu_device *) data, HFI_MSG_IDX); - hfi_process_queue((struct gmu_device *) data, HFI_DBG_IDX); + /* Process all asynchronous read (firmware to host) queues */ + hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL); } #define GMU_VER_MAJOR(ver) (((ver) >> 28) & 0xF) @@ -648,9 +662,6 @@ static int hfi_verify_fw_version(struct kgsl_device *device, return 0; } -/* Levels greater than or equal to LM_DCVS_LEVEL are subject to throttling */ -#define LM_DCVS_LEVEL 4 - int hfi_start(struct kgsl_device *device, struct gmu_device *gmu, uint32_t boot_state) { @@ -713,11 +724,8 @@ int hfi_start(struct kgsl_device *device, return result; if (test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) { - /* We want all bits starting at LM_DCVS_LEVEL to be 1 */ - int lm_data = -1 << (LM_DCVS_LEVEL - 1); - - result = hfi_send_feature_ctrl(gmu, - HFI_FEATURE_LM, 1, lm_data); + result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1, + device->pwrctrl.throttle_mask); if (result) return result; } @@ -773,14 +781,14 @@ int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data) cmd->hdr = CMD_MSG_HDR(H2F_MSG_LM_CFG, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd); } case H2F_MSG_GX_BW_PERF_VOTE: { struct hfi_gx_bw_perf_vote_cmd *cmd = data; cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd); } case H2F_MSG_PREPARE_SLUMBER: { struct hfi_prep_slumber_cmd *cmd = data; @@ -790,14 +798,14 @@ int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data) cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd); } case H2F_MSG_START: { struct hfi_start_cmd *cmd = data; cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd); } case H2F_MSG_GET_VALUE: { return hfi_send_get_value(gmu, data); @@ -807,7 +815,7 @@ int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data) cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd)); - return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd); + return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd); } default: break; @@ -830,7 +838,7 @@ irqreturn_t hfi_irq_handler(int irq, void *data) adreno_write_gmureg(ADRENO_DEVICE(device), ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status); - if (status & HFI_IRQ_MSGQ_MASK) + if (status & HFI_IRQ_DBGQ_MASK) tasklet_hi_schedule(&hfi->tasklet); if (status & HFI_IRQ_CM3_FAULT_MASK) { dev_err_ratelimited(&gmu->pdev->dev, diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index 0d6a3ef9641f76a2c55c69c5b3d57ee37e0fe8cb..6573d8d631f6a592e24fb9d7f3be85f5094584bc 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -15,7 +15,7 @@ #include -#define HFI_QUEUE_SIZE SZ_4K /* bytes */ +#define HFI_QUEUE_SIZE SZ_4K /* bytes, must be base 4dw */ #define MAX_RCVD_PAYLOAD_SIZE 16 /* dwords */ #define MAX_RCVD_SIZE (MAX_RCVD_PAYLOAD_SIZE + 3) /* dwords */ #define HFI_MAX_MSG_SIZE (SZ_1K>>2) /* dwords */ @@ -25,7 +25,11 @@ #define HFI_QUEUE_DISPATCH_CNT 1 #define HFI_QUEUE_MAX (HFI_QUEUE_DEFAULT_CNT + HFI_QUEUE_DISPATCH_CNT) -#define HFIMEM_SIZE (HFI_QUEUE_SIZE * (HFI_QUEUE_MAX + 1)) +struct hfi_queue_table; + +/* Total header sizes + queue sizes + 16 for alignment */ +#define HFIMEM_SIZE (sizeof(struct hfi_queue_table) + 16 + \ + (HFI_QUEUE_SIZE * HFI_QUEUE_MAX)) #define HFI_CMD_ID 0 #define HFI_MSG_ID 1 @@ -38,6 +42,11 @@ #define HFI_DSP_IDX_BASE 3 #define HFI_DSP_IDX_0 3 +#define HFI_CMD_IDX_LEGACY 0 +#define HFI_DSP_IDX_0_LEGACY 1 +#define HFI_MSG_IDX_LEGACY 4 +#define HFI_DBG_IDX_LEGACY 5 + #define HFI_QUEUE_STATUS_DISABLED 0 #define HFI_QUEUE_STATUS_ENABLED 1 @@ -47,26 +56,15 @@ #define HFI_DBG_PRI 40 #define HFI_DSP_PRI_0 20 -#define HFI_RSP_TIMEOUT 5000 /* msec */ +#define HFI_RSP_TIMEOUT 100 /* msec */ #define HFI_H2F_CMD_IRQ_MASK BIT(0) -#define HFI_QUEUE_OFFSET(i) \ - ((sizeof(struct hfi_queue_table)) + \ - ((i) * HFI_QUEUE_SIZE)) - -#define HOST_QUEUE_START_ADDR(hfi_mem, i) \ - ((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i)) - -#define GMU_QUEUE_START_ADDR(hfi_mem, i) \ - ((hfi_mem)->gmuaddr + HFI_QUEUE_OFFSET(i)) - #define HFI_IRQ_MSGQ_MASK BIT(0) #define HFI_IRQ_SIDEMSGQ_MASK BIT(1) #define HFI_IRQ_DBGQ_MASK BIT(2) #define HFI_IRQ_CM3_FAULT_MASK BIT(15) #define HFI_IRQ_OOB_MASK GENMASK(31, 16) -#define HFI_IRQ_MASK (HFI_IRQ_MSGQ_MASK |\ - HFI_IRQ_SIDEMSGQ_MASK |\ +#define HFI_IRQ_MASK (HFI_IRQ_SIDEMSGQ_MASK |\ HFI_IRQ_DBGQ_MASK |\ HFI_IRQ_CM3_FAULT_MASK) @@ -588,14 +586,10 @@ struct hfi_context_bad_reply_cmd { /** * struct pending_cmd - data structure to track outstanding HFI * command messages - * @msg_complete: a blocking mechanism for sender to wait for ACK - * @node: a node in pending message queue * @sent_hdr: copy of outgoing header for response comparison * @results: the payload of received return message (ACK) */ struct pending_cmd { - struct completion msg_complete; - struct list_head node; uint32_t sent_hdr; uint32_t results[MAX_RCVD_SIZE]; }; @@ -604,11 +598,7 @@ struct pending_cmd { * struct kgsl_hfi - HFI control structure * @kgsldev: Point to the kgsl device * @hfi_interrupt_num: number of GMU asserted HFI interrupt - * @msglock: spinlock to protect access to outstanding command message list - * @read_queue_lock: spinlock to protect against concurrent reading of queues * @cmdq_mutex: mutex to protect command queue access from multiple senders - * @msglist: outstanding command message list. Each message in the list - * is waiting for ACK from GMU * @tasklet: the thread handling received messages from GMU * @version: HFI version number provided * @seqnum: atomic counter that is incremented for each message sent. The @@ -618,10 +608,7 @@ struct pending_cmd { struct kgsl_hfi { struct kgsl_device *kgsldev; int hfi_interrupt_num; - spinlock_t msglock; - spinlock_t read_queue_lock; struct mutex cmdq_mutex; - struct list_head msglist; struct tasklet_struct tasklet; uint32_t version; atomic_t seqnum; diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index c7126fb810113d875218ee2122e7114ab38896a1..d0b0c8d59064c71c2989a22ab22f7c60b05c0e3b 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -847,13 +847,23 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr); if (!no_page_fault_log && __ratelimit(&_rs)) { + const char *api_str; + + if (context != NULL) { + struct adreno_context *drawctxt = + ADRENO_CONTEXT(context); + + api_str = get_api_type_str(drawctxt->type); + } else + api_str = "UNKNOWN"; + KGSL_MEM_CRIT(ctx->kgsldev, "GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr, ptname, context != NULL ? context->proc_priv->comm : "unknown"); KGSL_MEM_CRIT(ctx->kgsldev, - "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n", - ctx->name, ptbase, contextidr, + "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n", + ctx->name, api_str, ptbase, contextidr, write ? "write" : "read", fault_type); if (gpudev->iommu_fault_block) { @@ -1168,7 +1178,7 @@ void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt) int ret; /* GPU pagetable walk LLC slice not enabled */ - if (!adreno_dev->gpuhtw_llc_slice) + if (IS_ERR(adreno_dev->gpuhtw_llc_slice)) return; /* Domain attribute to enable system cache for GPU pagetable walks */ diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 18153e3ff558c41f40636f494c247105119d6f4b..95190163a4ca3246ac952d9a8909db6d546cffaa 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -226,7 +226,7 @@ static int kgsl_bus_scale_request(struct kgsl_device *device, int ret = 0; /* GMU scales BW */ - if (gmu_core_gpmu_isenabled(device)) + if (gmu_core_scales_bandwidth(device)) ret = gmu_core_dcvs_set(device, INVALID_DCVS_IDX, buslevel); else if (pwr->pcl) /* Linux bus driver scales BW */ @@ -2839,6 +2839,12 @@ _aware(struct kgsl_device *device) status = gmu_core_start(device); break; case KGSL_STATE_INIT: + /* if GMU already in FAULT */ + if (gmu_core_isenabled(device) && + test_bit(GMU_FAULT, &device->gmu_core.flags)) { + status = -EINVAL; + break; + } status = kgsl_pwrctrl_enable(device); break; /* The following 3 cases shouldn't occur, but don't panic. */ diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index 0a78aba0657eab083b57848552ed28666589262c..61fc16fa16b29ce93a827f92507fe6c3e9eb9972 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -137,6 +137,7 @@ struct kgsl_regulator { * @max_pwrlevel - maximum allowable powerlevel per the user * @min_pwrlevel - minimum allowable powerlevel per the user * @num_pwrlevels - number of available power levels + * @throttle_mask - LM throttle mask * @interval_timeout - timeout in jiffies to be idle before a power event * @clock_times - Each GPU frequency's accumulated active time in us * @regulators - array of pointers to kgsl_regulator structs @@ -196,6 +197,7 @@ struct kgsl_pwrctrl { unsigned int max_pwrlevel; unsigned int min_pwrlevel; unsigned int num_pwrlevels; + unsigned int throttle_mask; unsigned long interval_timeout; u64 clock_times[KGSL_MAX_PWRLEVELS]; struct kgsl_regulator regulators[KGSL_MAX_REGULATORS]; diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index e207a073c78a5c5e6e9fad9d22e9a3274abfe4e6..244118f31d19765127fc507dd6ad1cfea17b4e6e 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -875,7 +875,7 @@ int kgsl_busmon_get_cur_freq(struct device *dev, unsigned long *freq) static int opp_notify(struct notifier_block *nb, unsigned long type, void *in_opp) { - int result = -EINVAL, level, min_level, max_level; + int level, min_level, max_level; struct kgsl_pwrctrl *pwr = container_of(nb, struct kgsl_pwrctrl, nb); struct kgsl_device *device = container_of(pwr, struct kgsl_device, pwrctrl); @@ -884,20 +884,19 @@ static int opp_notify(struct notifier_block *nb, unsigned long min_freq = 0, max_freq = pwr->pwrlevels[0].gpu_freq; if (type != OPP_EVENT_ENABLE && type != OPP_EVENT_DISABLE) - return result; + return -EINVAL; opp = dev_pm_opp_find_freq_floor(dev, &max_freq); - dev_pm_opp_put(opp); - - if (IS_ERR(opp)) { + if (IS_ERR(opp)) return PTR_ERR(opp); - } - opp = dev_pm_opp_find_freq_ceil(dev, &min_freq); dev_pm_opp_put(opp); + opp = dev_pm_opp_find_freq_ceil(dev, &min_freq); if (IS_ERR(opp)) min_freq = pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq; + else + dev_pm_opp_put(opp); mutex_lock(&device->mutex); diff --git a/drivers/gpu/msm/kgsl_rgmu.c b/drivers/gpu/msm/kgsl_rgmu.c new file mode 100644 index 0000000000000000000000000000000000000000..76924b744733636fda88c34377c3e2ef99125082 --- /dev/null +++ b/drivers/gpu/msm/kgsl_rgmu.c @@ -0,0 +1,463 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include + +#include "kgsl_device.h" +#include "kgsl_rgmu.h" +#include "kgsl_gmu_core.h" +#include "kgsl_trace.h" +#include "adreno.h" + +#define RGMU_CLK_FREQ 200000000 + +static int rgmu_irq_probe(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + int ret; + + rgmu->oob_interrupt_num = platform_get_irq_byname(rgmu->pdev, + "kgsl_oob"); + + ret = devm_request_irq(&rgmu->pdev->dev, + rgmu->oob_interrupt_num, + oob_irq_handler, IRQF_TRIGGER_HIGH, + "kgsl-oob", device); + if (ret) { + dev_err(&rgmu->pdev->dev, + "Request kgsl-oob interrupt failed:%d\n", ret); + return ret; + } + + rgmu->rgmu_interrupt_num = platform_get_irq_byname(rgmu->pdev, + "kgsl_rgmu"); + + ret = devm_request_irq(&rgmu->pdev->dev, + rgmu->rgmu_interrupt_num, + rgmu_irq_handler, IRQF_TRIGGER_HIGH, + "kgsl-rgmu", device); + if (ret) + dev_err(&rgmu->pdev->dev, + "Request kgsl-rgmu interrupt failed:%d\n", ret); + + return ret; +} + +static int rgmu_regulators_probe(struct rgmu_device *rgmu, + struct device_node *node) +{ + int ret; + + rgmu->cx_gdsc = devm_regulator_get(&rgmu->pdev->dev, "vddcx"); + if (IS_ERR_OR_NULL(rgmu->cx_gdsc)) { + ret = PTR_ERR(rgmu->cx_gdsc); + dev_err(&rgmu->pdev->dev, + "Couldn't get CX gdsc error:%d\n", ret); + rgmu->cx_gdsc = NULL; + return ret; + } + + rgmu->gx_gdsc = devm_regulator_get(&rgmu->pdev->dev, "vdd"); + if (IS_ERR_OR_NULL(rgmu->gx_gdsc)) { + ret = PTR_ERR(rgmu->gx_gdsc); + dev_err(&rgmu->pdev->dev, + "Couldn't get GX gdsc error:%d\n", ret); + rgmu->gx_gdsc = NULL; + return ret; + } + + return 0; +} + +static int rgmu_clocks_probe(struct rgmu_device *rgmu, struct device_node *node) +{ + const char *cname; + struct property *prop; + struct clk *c; + int i = 0; + + of_property_for_each_string(node, "clock-names", prop, cname) { + + if (i >= ARRAY_SIZE(rgmu->clks)) { + dev_err(&rgmu->pdev->dev, + "dt: too many RGMU clocks defined\n"); + return -EINVAL; + } + + c = devm_clk_get(&rgmu->pdev->dev, cname); + if (IS_ERR_OR_NULL(c)) { + dev_err(&rgmu->pdev->dev, + "dt: Couldn't get clock: %s\n", cname); + return PTR_ERR(c); + } + + /* Remember the key clocks that we need to control later */ + if (!strcmp(cname, "core")) + rgmu->gpu_clk = c; + else if (!strcmp(cname, "gmu")) + rgmu->rgmu_clk = c; + + rgmu->clks[i++] = c; + } + + return 0; +} + +static inline int rgmu_clk_set_rate(struct clk *grp_clk, unsigned int freq) +{ + int ret = clk_set_rate(grp_clk, freq); + + if (ret) + pr_err("%s set freq %d failed:%d\n", + __clk_get_name(grp_clk), freq, ret); + + return ret; +} + + +static void rgmu_disable_clks(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + int j = 0, ret; + + /* Check GX GDSC is status */ + if (gmu_dev_ops->gx_is_on(ADRENO_DEVICE(device))) { + + if (IS_ERR_OR_NULL(rgmu->gx_gdsc)) + return; + + /* + * Switch gx gdsc control from RGMU to CPU. Force non-zero + * reference count in clk driver so next disable call will + * turn off the GDSC. + */ + ret = regulator_enable(rgmu->gx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Fail to enable gx gdsc:%d\n", ret); + + ret = regulator_disable(rgmu->gx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Fail to disable gx gdsc:%d\n", ret); + + if (gmu_dev_ops->gx_is_on(ADRENO_DEVICE(device))) + dev_err(&rgmu->pdev->dev, "gx is stuck on\n"); + } + + for (j = 0; j < ARRAY_SIZE(rgmu->clks); j++) + clk_disable_unprepare(rgmu->clks[j]); + + clear_bit(GMU_CLK_ON, &device->gmu_core.flags); +} + +static int rgmu_enable_clks(struct kgsl_device *device) +{ + int ret, j = 0; + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + if (IS_ERR_OR_NULL(rgmu->rgmu_clk) || + IS_ERR_OR_NULL(rgmu->gpu_clk)) + return -EINVAL; + + /* Let us set rgmu clk */ + ret = rgmu_clk_set_rate(rgmu->rgmu_clk, RGMU_CLK_FREQ); + if (ret) + return ret; + + /* Let us set gpu clk to default power level */ + ret = rgmu_clk_set_rate(rgmu->gpu_clk, + rgmu->gpu_freqs[pwr->default_pwrlevel]); + if (ret) + return ret; + + for (j = 0; j < ARRAY_SIZE(rgmu->clks); j++) { + ret = clk_prepare_enable(rgmu->clks[j]); + if (ret) { + dev_err(&rgmu->pdev->dev, + "Fail(%d) to enable gpucc clk idx %d\n", + ret, j); + return ret; + } + } + + set_bit(GMU_CLK_ON, &device->gmu_core.flags); + return 0; +} + +static int rgmu_disable_gdsc(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + int ret = 0; + + if (IS_ERR_OR_NULL(rgmu->cx_gdsc)) + return 0; + + ret = regulator_disable(rgmu->cx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Failed to disable CX gdsc:%d\n", ret); + + return ret; +} + +static int rgmu_enable_gdsc(struct rgmu_device *rgmu) +{ + int ret; + + if (IS_ERR_OR_NULL(rgmu->cx_gdsc)) + return 0; + + ret = regulator_enable(rgmu->cx_gdsc); + if (ret) + dev_err(&rgmu->pdev->dev, + "Fail to enable CX gdsc:%d\n", ret); + + return ret; +} + +static void rgmu_snapshot(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + /* Mask so there's no interrupt caused by NMI */ + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); + + /* Make sure the interrupt is masked */ + wmb(); + + kgsl_device_snapshot(device, NULL, true); + + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF); + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_MASK, + ~(gmu_dev_ops->gmu2host_intr_mask)); + + rgmu->fault_count++; +} + +/* Caller shall ensure GPU is ready for SLUMBER */ +static void rgmu_stop(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + + if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags)) + return; + + /* Wait for the lowest idle level we requested */ + if (gmu_dev_ops->wait_for_lowest_idle(adreno_dev)) + goto error; + + gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, + GMU_NOTIFY_SLUMBER, 0, 0); + + gmu_dev_ops->irq_disable(device); + rgmu_disable_clks(device); + rgmu_disable_gdsc(device); + return; + +error: + + /* + * The power controller will change state to SLUMBER anyway + * Set GMU_FAULT flag to indicate to power contrller + * that hang recovery is needed to power on GPU + */ + set_bit(GMU_FAULT, &device->gmu_core.flags); + gmu_dev_ops->irq_disable(device); + rgmu_snapshot(device); +} + +/* Do not access any RGMU registers in RGMU probe function */ +static int rgmu_probe(struct kgsl_device *device, struct device_node *node) +{ + struct rgmu_device *rgmu; + struct platform_device *pdev = of_find_device_by_node(node); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct resource *res; + int i, ret = -ENXIO; + + rgmu = devm_kzalloc(&pdev->dev, sizeof(*rgmu), GFP_KERNEL); + + if (rgmu == NULL) + return -ENOMEM; + + rgmu->pdev = pdev; + + /* Set up RGMU regulators */ + ret = rgmu_regulators_probe(rgmu, node); + if (ret) + return ret; + + /* Set up RGMU clocks */ + ret = rgmu_clocks_probe(rgmu, node); + if (ret) + return ret; + + /* Map and reserve RGMU CSRs registers */ + res = platform_get_resource_byname(rgmu->pdev, + IORESOURCE_MEM, "kgsl_rgmu"); + if (res == NULL) { + dev_err(&rgmu->pdev->dev, + "platform_get_resource failed\n"); + return -EINVAL; + } + + if (res->start == 0 || resource_size(res) == 0) { + dev_err(&rgmu->pdev->dev, + "Register region is invalid\n"); + return -EINVAL; + } + + rgmu->reg_phys = res->start; + rgmu->reg_len = resource_size(res); + device->gmu_core.reg_virt = devm_ioremap(&rgmu->pdev->dev, res->start, + resource_size(res)); + + if (device->gmu_core.reg_virt == NULL) { + dev_err(&rgmu->pdev->dev, "Unable to remap rgmu registers\n"); + return -ENODEV; + } + + device->gmu_core.gmu2gpu_offset = + (rgmu->reg_phys - device->reg_phys) >> 2; + device->gmu_core.reg_len = rgmu->reg_len; + device->gmu_core.ptr = (void *)rgmu; + + /* Initialize OOB and RGMU interrupts */ + ret = rgmu_irq_probe(device); + if (ret) + return ret; + + /* Don't enable RGMU interrupts until RGMU started */ + /* We cannot use rgmu_irq_disable because it writes registers */ + disable_irq(rgmu->rgmu_interrupt_num); + disable_irq(rgmu->oob_interrupt_num); + + /* Retrieves GPU power level configurations */ + for (i = 0; i < pwr->num_pwrlevels; i++) + rgmu->gpu_freqs[i] = pwr->pwrlevels[i].gpu_freq; + + rgmu->num_gpupwrlevels = pwr->num_pwrlevels; + + /* Set up RGMU idle states */ + if (ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_IFPC)) + rgmu->idle_level = GPU_HW_IFPC; + else + rgmu->idle_level = GPU_HW_ACTIVE; + + set_bit(GMU_ENABLED, &device->gmu_core.flags); + device->gmu_core.dev_ops = &adreno_a6xx_rgmudev; + + return 0; +} + +static int rgmu_suspend(struct kgsl_device *device) +{ + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + + if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags)) + return 0; + + gmu_dev_ops->irq_disable(device); + + if (gmu_dev_ops->rpmh_gpu_pwrctrl(ADRENO_DEVICE(device), + GMU_SUSPEND, 0, 0)) + return -EINVAL; + + rgmu_disable_clks(device); + return rgmu_disable_gdsc(device); +} + +/* To be called to power on both GPU and RGMU */ +static int rgmu_start(struct kgsl_device *device) +{ + int ret = 0; + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + switch (device->state) { + case KGSL_STATE_RESET: + ret = rgmu_suspend(device); + if (ret) + goto error_rgmu; + case KGSL_STATE_INIT: + case KGSL_STATE_SUSPEND: + case KGSL_STATE_SLUMBER: + rgmu_enable_gdsc(rgmu); + rgmu_enable_clks(device); + gmu_dev_ops->irq_enable(device); + ret = gmu_dev_ops->rpmh_gpu_pwrctrl(ADRENO_DEVICE(device), + GMU_FW_START, GMU_COLD_BOOT, 0); + if (ret) + goto error_rgmu; + break; + } + /* Request default DCVS level */ + kgsl_pwrctrl_set_default_gpu_pwrlevel(device); + return 0; + +error_rgmu: + set_bit(GMU_FAULT, &device->gmu_core.flags); + gmu_dev_ops->irq_disable(device); + rgmu_snapshot(device); + return ret; +} + +/* + * rgmu_dcvs_set() - Change GPU frequency and/or bandwidth. + * @rgmu: Pointer to RGMU device + * @pwrlevel: index to GPU DCVS table used by KGSL + * @bus_level: index to GPU bus table used by KGSL + * + * The function converts GPU power level and bus level index used by KGSL + * to index being used by GMU/RPMh. + */ +static int rgmu_dcvs_set(struct kgsl_device *device, + unsigned int pwrlevel, unsigned int bus_level) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + if (pwrlevel == INVALID_DCVS_IDX) + return -EINVAL; + + return rgmu_clk_set_rate(rgmu->gpu_clk, + rgmu->gpu_freqs[pwrlevel]); + +} + +static bool rgmu_regulator_isenabled(struct kgsl_device *device) +{ + struct rgmu_device *rgmu = KGSL_RGMU_DEVICE(device); + + return (rgmu->gx_gdsc && regulator_is_enabled(rgmu->gx_gdsc)); +} + +struct gmu_core_ops rgmu_ops = { + .probe = rgmu_probe, + .remove = rgmu_stop, + .start = rgmu_start, + .stop = rgmu_stop, + .dcvs_set = rgmu_dcvs_set, + .snapshot = rgmu_snapshot, + .regulator_isenabled = rgmu_regulator_isenabled, +}; diff --git a/drivers/gpu/msm/kgsl_rgmu.h b/drivers/gpu/msm/kgsl_rgmu.h new file mode 100644 index 0000000000000000000000000000000000000000..23dac449a299f2a18a928df4d8e0aa1271e71e7a --- /dev/null +++ b/drivers/gpu/msm/kgsl_rgmu.h @@ -0,0 +1,70 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_RGMU_H +#define __KGSL_RGMU_H + +#define RGMU_AO_IRQ_FENCE_ERR BIT(3) +#define RGMU_AO_IRQ_MASK RGMU_AO_IRQ_FENCE_ERR + +#define RGMU_OOB_IRQ_ERR_MSG BIT(24) +#define RGMU_OOB_IRQ_ACK_MASK GENMASK(23, 16) +#define RGMU_OOB_IRQ_ERR_MSG_MASK GENMASK(31, 24) +#define RGMU_OOB_IRQ_MASK RGMU_OOB_IRQ_ERR_MSG_MASK + +#define MAX_RGMU_CLKS 8 + +/** + * struct rgmu_device - rGMU device structure + * @reg_phys: RGMU CSR physical address + * @reg_virt: RGMU CSR virtual address + * @reg_len: RGMU CSR range + * @rgmu_interrupt_num: RGMU interrupt number + * @oob_interrupt_num: number of RGMU asserted OOB interrupt + * @fw_hostptr: Buffer which holds the RGMU firmware + * @fw_size: Size of RGMU firmware buffer + * @cx_gdsc: CX headswitch that controls power of RGMU and + subsystem peripherals + * @clks: RGMU clocks including the GPU + * @gpu_clk: Pointer to GPU core clock + * @rgmu_clk: Pointer to rgmu clock + * @gpu_freqs: GPU frequency table with lowest freq at index 0 + * @num_gpupwrlevels: number GPU frequencies in GPU freq table + * @flags: RGMU flags + * @idle_level: Minimal GPU idle power level + * @fault_count: RGMU fault count + */ +struct rgmu_device { + struct platform_device *pdev; + unsigned long reg_phys; + unsigned int reg_len; + unsigned int rgmu_interrupt_num; + unsigned int oob_interrupt_num; + unsigned int *fw_hostptr; + uint32_t fw_size; + struct regulator *cx_gdsc; + struct regulator *gx_gdsc; + struct clk *clks[MAX_RGMU_CLKS]; + struct clk *gpu_clk; + struct clk *rgmu_clk; + unsigned int gpu_freqs[MAX_GX_LEVELS]; + unsigned int num_gpupwrlevels; + unsigned int idle_level; + unsigned int fault_count; +}; + +extern struct gmu_dev_ops adreno_a6xx_rgmudev; +#define KGSL_RGMU_DEVICE(_a) ((struct rgmu_device *)((_a)->gmu_core.ptr)) + +irqreturn_t rgmu_irq_handler(int irq, void *data); +irqreturn_t oob_irq_handler(int irq, void *data); +#endif /* __KGSL_RGMU_H */ diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index 13769f8ec2d281ea7a7073a21e76d9d0aab098c5..bc8357f2f0e8d15eb19a65345181c3ddb025e77f 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -734,8 +734,8 @@ void kgsl_device_snapshot(struct kgsl_device *device, /* log buffer info to aid in ramdump fault tolerance */ pa = __pa(device->snapshot_memory.ptr); - KGSL_DRV_ERR(device, "snapshot created at pa %pa size %zd\n", - &pa, snapshot->size); + KGSL_DRV_ERR(device, "%s snapshot created at pa %pa++0x%zx\n", + gmu_fault ? "GMU" : "GPU", &pa, snapshot->size); sysfs_notify(&device->snapshot_kobj, NULL, "timestamp"); diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index 418f67f81c797aa417ff6947567aebb8f4ca6588..5e8e7b1ee890e3566c578c2a36e64172a3d9a92e 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -625,18 +625,11 @@ void kgsl_syncsource_put(struct kgsl_syncsource *syncsource) kref_put(&syncsource->refcount, kgsl_syncsource_destroy); } -void kgsl_syncsource_cleanup(struct kgsl_process_private *private, +static void kgsl_syncsource_cleanup(struct kgsl_process_private *private, struct kgsl_syncsource *syncsource) { struct kgsl_syncsource_fence *sfence, *next; - spin_lock(&private->syncsource_lock); - if (syncsource->id != 0) { - idr_remove(&private->syncsource_idr, syncsource->id); - syncsource->id = 0; - } - spin_unlock(&private->syncsource_lock); - /* Signal all fences to release any callbacks */ spin_lock(&syncsource->lock); @@ -661,10 +654,17 @@ long kgsl_ioctl_syncsource_destroy(struct kgsl_device_private *dev_priv, spin_lock(&private->syncsource_lock); syncsource = idr_find(&private->syncsource_idr, param->id); - spin_unlock(&private->syncsource_lock); - if (syncsource == NULL) + if (syncsource == NULL) { + spin_unlock(&private->syncsource_lock); return -EINVAL; + } + + if (syncsource->id != 0) { + idr_remove(&private->syncsource_idr, syncsource->id); + syncsource->id = 0; + } + spin_unlock(&private->syncsource_lock); kgsl_syncsource_cleanup(private, syncsource); return 0; @@ -809,6 +809,32 @@ static void kgsl_syncsource_fence_release(struct dma_fence *fence) kfree(sfence); } +void kgsl_syncsource_process_release_syncsources( + struct kgsl_process_private *private) +{ + struct kgsl_syncsource *syncsource; + int next = 0; + + while (1) { + spin_lock(&private->syncsource_lock); + syncsource = idr_get_next(&private->syncsource_idr, &next); + + if (syncsource == NULL) { + spin_unlock(&private->syncsource_lock); + break; + } + + if (syncsource->id != 0) { + idr_remove(&private->syncsource_idr, syncsource->id); + syncsource->id = 0; + } + spin_unlock(&private->syncsource_lock); + + kgsl_syncsource_cleanup(private, syncsource); + next = next + 1; + } +} + static const char *kgsl_syncsource_get_timeline_name(struct dma_fence *fence) { struct kgsl_syncsource_fence *sfence = diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h index 03f501f764c802c6ce36e64e1a06714d9d72c868..87cc9c1c126570a1f80a42c118e7f5e70d96ad7a 100644 --- a/drivers/gpu/msm/kgsl_sync.h +++ b/drivers/gpu/msm/kgsl_sync.h @@ -108,8 +108,8 @@ long kgsl_ioctl_syncsource_signal_fence(struct kgsl_device_private *dev_priv, void kgsl_syncsource_put(struct kgsl_syncsource *syncsource); -void kgsl_syncsource_cleanup(struct kgsl_process_private *private, - struct kgsl_syncsource *syncsource); +void kgsl_syncsource_process_release_syncsources( + struct kgsl_process_private *private); void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event, char *fence_str, int len); @@ -182,8 +182,8 @@ static inline void kgsl_syncsource_put(struct kgsl_syncsource *syncsource) } -static inline void kgsl_syncsource_cleanup(struct kgsl_process_private *private, - struct kgsl_syncsource *syncsource) +static inline void kgsl_syncsource_process_release_syncsources( + struct kgsl_process_private *private) { } diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c index 943e5c4ea855dd09065a1e967d2d2c3a7c0e954f..80cf70e0d73a6b2dd65132164ac486fe2d097108 100644 --- a/drivers/hwtracing/coresight/coresight-ost.c +++ b/drivers/hwtracing/coresight/coresight-ost.c @@ -176,6 +176,9 @@ static inline int __stm_trace(uint32_t flags, uint8_t entity_id, uint32_t ch; void __iomem *ch_addr; + if (!(drvdata && drvdata->master_enable)) + return 0; + /* allocate channel and get the channel address */ ch = stm_channel_alloc(); if (unlikely(ch >= drvdata->numsp)) { @@ -226,9 +229,9 @@ int stm_trace(uint32_t flags, uint8_t entity_id, uint8_t proto_id, struct stm_drvdata *drvdata = stmdrvdata; /* we don't support sizes more than 24bits (0 to 23) */ - if (!(drvdata && drvdata->enable && drvdata->master_enable && - test_bit(entity_id, drvdata->entities) && size && - (size < 0x1000000))) + if (!(drvdata && drvdata->enable && + test_bit(entity_id, drvdata->entities) && + size && (size < 0x1000000))) return 0; return __stm_trace(flags, entity_id, proto_id, data, size); diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 9b0525c272380037338c7d4bb40597928e469170..2b537932f3857d1d88eb7f29db7d87a8f0fcdf92 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -367,6 +367,9 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data, if (!(drvdata && local_read(&drvdata->mode))) return -EACCES; + if (!drvdata->master_enable) + return -EPERM; + if (channel >= drvdata->numsp) return -EINVAL; diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index 3067e6daca9438717cd31de3d6e5f9e748531a05..c706ef9d0dc01f12c5c9778faf4b283b1b3cbe5b 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -219,6 +219,7 @@ struct dsb_dataset { uint32_t trig_patt_val[TPDM_DSB_MAX_PATT]; uint32_t trig_patt_mask[TPDM_DSB_MAX_PATT]; bool trig_ts; + bool trig_type; uint32_t select_val[TPDM_DSB_MAX_SELECT]; uint32_t msr[TPDM_DSB_MAX_MSR]; }; @@ -552,6 +553,13 @@ static void __tpdm_enable_dsb(struct tpdm_drvdata *drvdata) val = val | BIT(1); else val = val & ~BIT(1); + + /* Set trigger type */ + if (drvdata->dsb->trig_type) + val = val | BIT(12); + else + val = val & ~BIT(12); + tpdm_writel(drvdata, val, TPDM_DSB_CR); val = tpdm_readl(drvdata, TPDM_DSB_CR); @@ -3305,6 +3313,43 @@ static ssize_t tpdm_store_dsb_trig_patt_mask(struct device *dev, static DEVICE_ATTR(dsb_trig_patt_mask, 0644, tpdm_show_dsb_trig_patt_mask, tpdm_store_dsb_trig_patt_mask); +static ssize_t tpdm_show_dsb_trig_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (!test_bit(TPDM_DS_DSB, drvdata->datasets)) + return -EPERM; + + return scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)drvdata->dsb->trig_type); +} + +static ssize_t tpdm_store_dsb_trig_type(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!test_bit(TPDM_DS_DSB, drvdata->datasets)) + return -EPERM; + + mutex_lock(&drvdata->lock); + if (val) + drvdata->dsb->trig_type = true; + else + drvdata->dsb->trig_type = false; + mutex_unlock(&drvdata->lock); + return size; +} +static DEVICE_ATTR(dsb_trig_type, 0644, + tpdm_show_dsb_trig_type, tpdm_store_dsb_trig_type); + static ssize_t tpdm_show_dsb_trig_ts(struct device *dev, struct device_attribute *attr, char *buf) @@ -4118,6 +4163,7 @@ static struct attribute *tpdm_dsb_attrs[] = { &dev_attr_dsb_trig_patt_val.attr, &dev_attr_dsb_trig_patt_mask.attr, &dev_attr_dsb_trig_ts.attr, + &dev_attr_dsb_trig_type.attr, &dev_attr_dsb_select_val.attr, &dev_attr_dsb_msr.attr, NULL, @@ -4231,8 +4277,10 @@ static void tpdm_init_default_data(struct tpdm_drvdata *drvdata) if (test_bit(TPDM_DS_TC, drvdata->datasets)) drvdata->tc->retrieval_mode = TPDM_MODE_ATB; - if (test_bit(TPDM_DS_DSB, drvdata->datasets)) + if (test_bit(TPDM_DS_DSB, drvdata->datasets)) { drvdata->dsb->trig_ts = true; + drvdata->dsb->trig_type = false; + } if (test_bit(TPDM_DS_CMB, drvdata->datasets) || test_bit(TPDM_DS_MCMB, drvdata->datasets)) diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c index 7cc511dd0f99d7eb6becf17f2f20e2022edde4a0..2ce1031d6c8d22bc04351563a273fc7f3b51463e 100644 --- a/drivers/iio/adc/qcom-spmi-adc5.c +++ b/drivers/iio/adc/qcom-spmi-adc5.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -105,6 +106,11 @@ enum adc_cal_val { ADC_NEW_CAL }; +struct pmic_rev_data { + int subtype; + int rev4; +}; + /** * struct adc_channel_prop - ADC channel property. * @channel: channel number, refer to the channel list. @@ -156,6 +162,7 @@ struct adc_chip { bool poll_eoc; struct completion complete; struct mutex lock; + bool skip_usb_wa; const struct adc_data *data; }; @@ -332,7 +339,8 @@ static int adc_post_configure_usb_in_read(struct adc_chip *adc, { u8 data; - if ((prop->channel == ADC_USB_IN_V_16) && adc->cal_addr) { + if ((prop->channel == ADC_USB_IN_V_16) && adc->cal_addr && + !adc->skip_usb_wa) { data = ADC_CAL_DELAY_CTL_VAL_125MS; /* Set calibration measurement interval to 125ms */ return regmap_bulk_write(adc->regmap, @@ -448,7 +456,8 @@ static int adc_do_conversion(struct adc_chip *adc, mutex_lock(&adc->lock); - if ((prop->channel == ADC_USB_IN_V_16) && adc->cal_addr) { + if ((prop->channel == ADC_USB_IN_V_16) && adc->cal_addr && + !adc->skip_usb_wa) { ret = adc_pre_configure_usb_in_read(adc); if (ret) { pr_err("ADC configure failed with %d\n", ret); @@ -877,9 +886,30 @@ static int adc_get_dt_data(struct adc_chip *adc, struct device_node *node) return 0; } +static const struct pmic_rev_data pmic_data[] = { + {PM6150_SUBTYPE, 1}, +}; + +bool skip_usb_in_wa(struct pmic_revid_data *pmic_rev_id) +{ + int i = 0; + uint32_t tablesize = ARRAY_SIZE(pmic_data); + + while (i < tablesize) { + if (pmic_data[i].subtype == pmic_rev_id->pmic_subtype + && pmic_data[i].rev4 < pmic_rev_id->rev4) { + return true; + } + i++; + } + return false; +} + static int adc_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; + struct device_node *revid_dev_node; + struct pmic_revid_data *pmic_rev_id; struct device *dev = &pdev->dev; struct iio_dev *indio_dev; struct adc_chip *adc; @@ -887,6 +917,7 @@ static int adc_probe(struct platform_device *pdev) const __be32 *prop_addr; int ret, irq_eoc; u32 reg; + bool skip_usb_wa = false; regmap = dev_get_regmap(dev->parent, NULL); if (!regmap) @@ -896,6 +927,16 @@ static int adc_probe(struct platform_device *pdev) if (ret < 0) return ret; + revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0); + if (revid_dev_node) { + pmic_rev_id = get_revid_data(revid_dev_node); + if (!(IS_ERR(pmic_rev_id))) + skip_usb_wa = skip_usb_in_wa(pmic_rev_id); + else + pr_err("Unable to get revid\n"); + of_node_put(revid_dev_node); + } + indio_dev = devm_iio_device_alloc(dev, sizeof(*adc)); if (!indio_dev) return -ENOMEM; @@ -917,6 +958,8 @@ static int adc_probe(struct platform_device *pdev) else adc->cal_addr = be32_to_cpu(*prop_addr); + adc->skip_usb_wa = skip_usb_wa; + init_completion(&adc->complete); mutex_init(&adc->lock); diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c index 78ea30de6689a3c33045cd504fe5f175b94b413c..459faf9fe7ba596a51b80f2b49734d937c3b8cdd 100644 --- a/drivers/input/misc/qti-haptics.c +++ b/drivers/input/misc/qti-haptics.c @@ -839,7 +839,7 @@ static int qti_haptics_upload_effect(struct input_dev *dev, if (hrtimer_active(&chip->hap_disable_timer)) { rem = hrtimer_get_remaining(&chip->hap_disable_timer); time_us = ktime_to_us(rem); - dev_dbg(chip->dev, "waiting for playing clear sequence: %ld us\n", + dev_dbg(chip->dev, "waiting for playing clear sequence: %lld us\n", time_us); usleep_range(time_us, time_us + 100); } @@ -1157,7 +1157,8 @@ static enum hrtimer_restart qti_hap_disable_timer(struct hrtimer *timer) rc = qti_haptics_module_en(chip, false); if (rc < 0) - dev_err(chip->dev, "Disable haptics module failed\n", rc); + dev_err(chip->dev, "Disable haptics module failed, rc=%d\n", + rc); return HRTIMER_NORESTART; } @@ -1519,7 +1520,7 @@ static int wf_repeat_n_dbgfs_write(void *data, u64 val) break; if (i == ARRAY_SIZE(wf_repeat)) - pr_err("wf_repeat value %lu is invalid\n", val); + pr_err("wf_repeat value %llu is invalid\n", val); else effect->wf_repeat_n = i; @@ -1545,7 +1546,7 @@ static int wf_s_repeat_n_dbgfs_write(void *data, u64 val) break; if (i == ARRAY_SIZE(wf_s_repeat)) - pr_err("wf_s_repeat value %lu is invalid\n", val); + pr_err("wf_s_repeat value %llu is invalid\n", val); else effect->wf_s_repeat_n = i; diff --git a/drivers/input/touchscreen/hxchipset/himax_common.c b/drivers/input/touchscreen/hxchipset/himax_common.c index fe81162b05f509d78002c874f3306940a6e92e43..d350f1c19a11dff926abe3e4c090720a23a8b09f 100644 --- a/drivers/input/touchscreen/hxchipset/himax_common.c +++ b/drivers/input/touchscreen/hxchipset/himax_common.c @@ -1676,7 +1676,7 @@ static void himax_finger_leave(struct himax_ts_data *ts) #endif for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { - if (((ts->pre_finger_mask >> loop_i) & 1) == 1) { + if (((ts->old_finger >> loop_i) & 1) == 1) { input_mt_slot(ts->input_dev, loop_i); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); } diff --git a/drivers/input/touchscreen/hxchipset/himax_common.h b/drivers/input/touchscreen/hxchipset/himax_common.h index 6d48fe782ce7e85e37f75109810a94ab634302bb..dc88efb0d57785e529be285b9650e321d4484060 100644 --- a/drivers/input/touchscreen/hxchipset/himax_common.h +++ b/drivers/input/touchscreen/hxchipset/himax_common.h @@ -143,6 +143,7 @@ #define HX_FINGER_LEAVE 2 #define HX_REPORT_SZ 128 +#define HX_CMD_BYTE 1 enum HX_TS_PATH { HX_REPORT_COORD = 1, diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.c b/drivers/input/touchscreen/hxchipset/himax_platform.c index a32b7ccfd26b7d67cb3ea41d263c786e4df20e41..353f064f2731726ff9de96a51879603dcca46483 100644 --- a/drivers/input/touchscreen/hxchipset/himax_platform.c +++ b/drivers/input/touchscreen/hxchipset/himax_platform.c @@ -178,29 +178,32 @@ int himax_bus_read(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRe { .addr = client->addr, .flags = 0, - .len = 1, - .buf = &command, + .len = HX_CMD_BYTE, + .buf = buf, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, - .buf = buf, + .buf = buf + HX_CMD_BYTE, } }; - if (length > HX_REPORT_SZ * 2) { - I("%s: data length too large %d\n", __func__, length); - buf = kmalloc(length, GFP_KERNEL); + if (length > HX_REPORT_SZ) { + W("%s: data length too large %d!\n", __func__, length); + buf = kmalloc(length + HX_CMD_BYTE, GFP_KERNEL); if (!buf) { - E("%s: failed realloc buf %d\n", __func__, length); + E("%s: failed realloc buf %d\n", __func__, + length + HX_CMD_BYTE); return -EIO; } reallocate = true; - msg[1].buf = buf; + msg[0].buf = buf; + msg[1].buf = buf + HX_CMD_BYTE; } mutex_lock(&ts->rw_lock); + buf[0] = command; for (retry = 0; retry < toRetry; retry++) { if (i2c_transfer(client->adapter, msg, 2) == 2) @@ -216,7 +219,7 @@ int himax_bus_read(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRe return -EIO; } - memcpy(data, buf, length); + memcpy(data, buf + HX_CMD_BYTE, length); mutex_unlock(&ts->rw_lock); if (reallocate) @@ -236,16 +239,17 @@ int himax_bus_write(uint8_t command, uint8_t *data, uint32_t length, uint8_t toR { .addr = client->addr, .flags = 0, - .len = length + 1, + .len = length + HX_CMD_BYTE, .buf = buf, } }; - if (length + 1 > HX_REPORT_SZ * 2) { - I("%s: data length too large %d\n", __func__, length + 1); - buf = kmalloc(length + 1, GFP_KERNEL); + if (length > HX_REPORT_SZ) { + W("%s: data length too large %d!\n", __func__, length); + buf = kmalloc(length + HX_CMD_BYTE, GFP_KERNEL); if (!buf) { - E("%s: failed realloc buf %d\n", __func__, length + 1); + E("%s: failed realloc buf %d\n", __func__, + length + HX_CMD_BYTE); return -EIO; } reallocate = true; @@ -253,7 +257,7 @@ int himax_bus_write(uint8_t command, uint8_t *data, uint32_t length, uint8_t toR mutex_lock(&ts->rw_lock); buf[0] = command; - memcpy(buf + 1, data, length); + memcpy(buf + HX_CMD_BYTE, data, length); for (retry = 0; retry < toRetry; retry++) { if (i2c_transfer(client->adapter, msg, 1) == 1) @@ -298,8 +302,8 @@ int himax_bus_master_write(uint8_t *data, uint32_t length, uint8_t toRetry) } }; - if (length > HX_REPORT_SZ * 2) { - I("%s: data length too large %d\n", __func__, length); + if (length > HX_REPORT_SZ) { + W("%s: data length too large %d!\n", __func__, length); buf = kmalloc(length, GFP_KERNEL); if (!buf) { E("%s: failed realloc buf %d\n", __func__, length); @@ -797,7 +801,7 @@ int himax_chip_common_probe(struct i2c_client *client, const struct i2c_device_i mutex_init(&ts->rw_lock); private_ts = ts; - ts->report_i2c_data = kmalloc(HX_REPORT_SZ * 2, GFP_KERNEL); + ts->report_i2c_data = kmalloc(HX_REPORT_SZ + HX_CMD_BYTE, GFP_KERNEL); if (ts->report_i2c_data == NULL) { E("%s: allocate report_i2c_data failed\n", __func__); ret = -ENOMEM; diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index 8b72ae870a7781e5e247f75ce24bc84dc0fe1839..e5d3542c75ef680cb36289595043c3795393beb7 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -2791,6 +2791,11 @@ static void fts_enter_pointer_event_handler(struct fts_ts_info *info, if (z == 0) z = 10; + if (info->bdata->x_flip) + x = X_AXIS_MAX - x; + if (info->bdata->y_flip) + y = Y_AXIS_MAX - y; + if (x == X_AXIS_MAX) x--; @@ -3229,13 +3234,13 @@ static int cx_crc_check(void) { unsigned char regAdd1[3] = {FTS_CMD_HW_REG_R, ADDR_CRC_BYTE0, ADDR_CRC_BYTE1}; - unsigned char val[2]; + unsigned char val[2] = {0}; unsigned char crc_status; int res; u8 cmd[4] = { FTS_CMD_HW_REG_W, 0x00, 0x00, SYSTEM_RESET_VALUE }; int event_to_search[2] = {(int)EVENTID_ERROR_EVENT, (int)EVENT_TYPE_CHECKSUM_ERROR}; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; /* read 2 bytes because the first one is a dummy byte! */ res = fts_readCmd(regAdd1, sizeof(regAdd1), val, 2); @@ -3252,7 +3257,6 @@ static int cx_crc_check(void) return crc_status; } - logError(0, "%s %s: Verifying if Config CRC Error...\n", tag, __func__); u16ToU8_be(SYSTEM_RESET_ADDRESS, &cmd[1]); res = fts_writeCmd(cmd, 4); @@ -3283,7 +3287,7 @@ static void fts_fw_update_auto(struct work_struct *work) u8 cmd[4] = { FTS_CMD_HW_REG_W, 0x00, 0x00, SYSTEM_RESET_VALUE }; int event_to_search[2] = {(int)EVENTID_ERROR_EVENT, (int)EVENT_TYPE_CHECKSUM_ERROR}; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int flag_init = 0; int retval = 0; int retval1 = 0; @@ -4347,6 +4351,9 @@ static int parse_dt(struct device *dev, bdata->reset_gpio = GPIO_NOT_DEFINED; } + bdata->x_flip = of_property_read_bool(np, "st,x-flip"); + bdata->y_flip = of_property_read_bool(np, "st,y-flip"); + return OK; } diff --git a/drivers/input/touchscreen/st/fts.h b/drivers/input/touchscreen/st/fts.h index cf2a2196d024719612f6499c0a03e55732b15019..ed2ec0a4f2857176edb85535e7cc4f00d862c30a 100644 --- a/drivers/input/touchscreen/st/fts.h +++ b/drivers/input/touchscreen/st/fts.h @@ -183,6 +183,8 @@ extern struct mutex gestureMask_mutex; #endif struct fts_i2c_platform_data { + bool x_flip; + bool y_flip; int (*power)(bool on); int irq_gpio; int reset_gpio; diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c index dc5d3661d02ad9524d50275b686418c092704cf8..513c44dd31f9e08e04fc6814ac0fa637a0116cc1 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c @@ -235,7 +235,7 @@ int flashProcedure(const char *path, int force, int keep_cx) int flash_status(void) { u8 cmd[2] = {FLASH_CMD_READSTATUS, 0x00}; - u8 readData; + u8 readData = 0; logError(0, "%s %s:Reading ...\n", tag, __func__); if (fts_readCmd(cmd, 2, &readData, FLASH_STATUS_BYTES) < 0) { @@ -589,7 +589,7 @@ int flash_burn(Firmware fw, int force_burn, int keep_cx) int wait_for_flash_ready(u8 type) { u8 cmd[2] = {FLASH_CMD_READ_REGISTER, type}; - u8 readData; + u8 readData = 0; int i, res = -1; logError(0, "%s Waiting for flash ready ...\n", tag); diff --git a/drivers/input/touchscreen/st/fts_lib/ftsGesture.c b/drivers/input/touchscreen/st/fts_lib/ftsGesture.c index 77075b4be2a5c1dadf588c4405796dc12566d560..1fa58e4c455b579c13a8e7f4b64444a633ecbdc2 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsGesture.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsGesture.c @@ -110,7 +110,7 @@ int updateGestureMask(u8 *mask, int size, int en) int enableGesture(u8 *mask, int size) { u8 cmd[GESTURE_MASK_SIZE + 2]; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int i, res; int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, 0x00, GESTURE_ENABLE }; @@ -175,7 +175,7 @@ int enableGesture(u8 *mask, int size) int disableGesture(u8 *mask, int size) { u8 cmd[2 + GESTURE_MASK_SIZE]; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; u8 temp; int i, res; int event_to_search[4] = { EVENTID_GESTURE, @@ -246,7 +246,7 @@ int startAddCustomGesture(u8 gestureID) { u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GESTURE_START_ADD, gestureID }; int res; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_START_ADD }; @@ -279,7 +279,7 @@ int finishAddCustomGesture(u8 gestureID) u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GESTURE_FINISH_ADD, gestureID }; int res; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_FINISH_ADD }; @@ -316,7 +316,7 @@ int loadCustomGesture(u8 *template, u8 gestureID) u8 cmd[TEMPLATE_CHUNK + 5]; int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GESTURE_DATA_ADD }; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; logError(0, "%s Starting adding custom gesture procedure...\n", tag); @@ -512,7 +512,7 @@ int removeCustomGesture(u8 gestureID) u8 cmd[3] = { FTS_CMD_GESTURE_CMD, GETURE_REMOVE_CUSTOM, gestureID }; int event_to_search[4] = { EVENTID_GESTURE, EVENT_TYPE_ENB, gestureID, GETURE_REMOVE_CUSTOM }; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; index = gestureID - GESTURE_CUSTOM_OFFSET; diff --git a/drivers/input/touchscreen/st/fts_lib/ftsTest.c b/drivers/input/touchscreen/st/fts_lib/ftsTest.c index c29e5ed598e308fb22cd797118d3b132b7f04d48..59f3ac3284ffe846fc8e9380009cc123e2e87c8d 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsTest.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsTest.c @@ -364,7 +364,7 @@ int production_test_ito(void) { int res = OK; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; //look for ito event int eventToSearch[2] = {EVENTID_ERROR_EVENT, EVENT_TYPE_ITO}; @@ -417,7 +417,7 @@ int production_test_initialization(void) { int res; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_FULL_INITIALIZATION}; @@ -475,7 +475,7 @@ int ms_compensation_tuning(void) { int res; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_MS_TUNING_CMPL}; @@ -515,7 +515,7 @@ int ss_compensation_tuning(void) { int res; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_SS_TUNING_CMPL}; @@ -556,7 +556,7 @@ int lp_timer_calibration(void) { int res; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_LPTIMER_TUNING_CMPL}; @@ -599,7 +599,7 @@ int save_cx_tuning(void) { int res; u8 cmd; - u8 readData[FIFO_EVENT_SIZE]; + u8 readData[FIFO_EVENT_SIZE] = {0}; int eventToSearch[2] = {EVENTID_STATUS_UPDATE, EVENT_TYPE_COMP_DATA_SAVED}; diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c index 1ddd0d40f71f9e00d428f1d4d1141ce38714b6a9..740ccfe5d3071debdc9dc96829d13ea87ead17c1 100644 --- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c +++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c @@ -47,6 +47,7 @@ #endif #include +#include #define INPUT_PHYS_NAME "synaptics_dsx/touch_input" #define STYLUS_PHYS_NAME "synaptics_dsx/stylus" @@ -147,7 +148,7 @@ static int synaptics_rmi4_suspend(struct device *dev); static int synaptics_rmi4_resume(struct device *dev); -static int synaptics_rmi4_defer_probe(struct platform_device *pdev); +static void synaptics_rmi4_defer_probe(struct work_struct *work); static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -4287,22 +4288,57 @@ static int synaptics_rmi4_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s: Failed to register fb notifier client\n", __func__); + goto err_drm_reg; } #endif + + rmi4_data->rmi4_probe_wq = create_singlethread_workqueue( + "Synaptics_rmi4_probe_wq"); + if (!rmi4_data->rmi4_probe_wq) { + dev_err(&pdev->dev, + "%s: Failed to create probe workqueue\n", + __func__); + goto err_probe_wq; + } + INIT_WORK(&rmi4_data->rmi4_probe_work, synaptics_rmi4_defer_probe); + queue_work(rmi4_data->rmi4_probe_wq, &rmi4_data->rmi4_probe_work); + + return retval; + +err_probe_wq: +#ifdef CONFIG_FB + msm_drm_unregister_client(&rmi4_data->fb_notifier); +#endif + +err_drm_reg: + kfree(rmi4_data); + return retval; } -static int synaptics_rmi4_defer_probe(struct platform_device *pdev) +static void synaptics_rmi4_defer_probe(struct work_struct *work) { int retval; - struct synaptics_rmi4_data *rmi4_data; + unsigned char attr_count; + struct synaptics_rmi4_data *rmi4_data = container_of(work, + struct synaptics_rmi4_data, rmi4_probe_work); + struct platform_device *pdev; const struct synaptics_dsx_hw_interface *hw_if; const struct synaptics_dsx_board_data *bdata; - unsigned char attr_count; - rmi4_data = platform_get_drvdata(pdev); + pdev = rmi4_data->pdev; hw_if = rmi4_data->hw_if; bdata = hw_if->board_data; + + init_completion(&rmi4_data->drm_init_done); + retval = wait_for_completion_interruptible(&rmi4_data->drm_init_done); + if (retval < 0) { + dev_err(&pdev->dev, + "%s: Wait for DRM init was interrupted\n", + __func__); + goto err_drm_init_wait; + } + retval = synaptics_rmi4_get_reg(rmi4_data, true); if (retval < 0) { dev_err(&pdev->dev, @@ -4439,8 +4475,9 @@ static int synaptics_rmi4_defer_probe(struct platform_device *pdev) INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work); queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work); #endif + rmi4_data->initialized = true; - return retval; + return; err_sysfs: for (attr_count--; attr_count >= 0; attr_count--) { @@ -4458,10 +4495,6 @@ static int synaptics_rmi4_defer_probe(struct platform_device *pdev) synaptics_rmi4_irq_enable(rmi4_data, false, false); err_enable_irq: -#ifdef CONFIG_FB - msm_drm_unregister_client(&rmi4_data->fb_notifier); -#endif - #ifdef USE_EARLYSUSPEND unregister_early_suspend(&rmi4_data->early_suspend); #endif @@ -4492,13 +4525,12 @@ static int synaptics_rmi4_defer_probe(struct platform_device *pdev) devm_pinctrl_put(rmi4_data->ts_pinctrl); rmi4_data->ts_pinctrl = NULL; } else { - retval = pinctrl_select_state( - rmi4_data->ts_pinctrl, - rmi4_data->pinctrl_state_release); - if (retval) + if (pinctrl_select_state( + rmi4_data->ts_pinctrl, + rmi4_data->pinctrl_state_release)) dev_err(&pdev->dev, - "%s: Failed to create sysfs attributes\n", - __func__); + "%s: Failed to select %s pinstate\n", + __func__, PINCTRL_STATE_RELEASE); } } @@ -4506,9 +4538,15 @@ static int synaptics_rmi4_defer_probe(struct platform_device *pdev) synaptics_rmi4_get_reg(rmi4_data, false); err_get_reg: +err_drm_init_wait: +#ifdef CONFIG_FB + msm_drm_unregister_client(&rmi4_data->fb_notifier); +#endif + cancel_work_sync(&rmi4_data->rmi4_probe_work); + destroy_workqueue(rmi4_data->rmi4_probe_wq); kfree(rmi4_data); - return retval; + return; } static int synaptics_rmi4_remove(struct platform_device *pdev) @@ -4583,6 +4621,9 @@ static int synaptics_rmi4_remove(struct platform_device *pdev) synaptics_rmi4_enable_reg(rmi4_data, false); synaptics_rmi4_get_reg(rmi4_data, false); + cancel_work_sync(&rmi4_data->rmi4_probe_work); + destroy_workqueue(rmi4_data->rmi4_probe_wq); + kfree(rmi4_data); return 0; @@ -4605,18 +4646,16 @@ static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self, if (event == MSM_DRM_EVENT_BLANK) { transition = *(int *)evdata->data; if (transition == MSM_DRM_BLANK_POWERDOWN) { - if (!rmi4_data->initialized) - return -ECANCELED; - synaptics_rmi4_suspend(&rmi4_data->pdev->dev); + if (rmi4_data->initialized) + synaptics_rmi4_suspend( + &rmi4_data->pdev->dev); rmi4_data->fb_ready = false; } else if (transition == MSM_DRM_BLANK_UNBLANK) { - if (!rmi4_data->initialized) { - if (synaptics_rmi4_defer_probe( - rmi4_data->pdev)) - return -ECANCELED; - rmi4_data->initialized = true; - } - synaptics_rmi4_resume(&rmi4_data->pdev->dev); + if (rmi4_data->initialized) + synaptics_rmi4_resume( + &rmi4_data->pdev->dev); + else + complete(&rmi4_data->drm_init_done); rmi4_data->fb_ready = true; } } diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h index 423db092f9767e68c76844d8b2ed3e110355f97a..519a30b0504185904dec27f608f21cd81251e968 100644 --- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h +++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h @@ -370,6 +370,9 @@ struct synaptics_rmi4_data { struct mutex rmi4_irq_enable_mutex; struct delayed_work rb_work; struct workqueue_struct *rb_workqueue; + struct work_struct rmi4_probe_work; + struct workqueue_struct *rmi4_probe_wq; + struct completion drm_init_done; struct pinctrl *ts_pinctrl; struct pinctrl_state *pinctrl_state_active; struct pinctrl_state *pinctrl_state_suspend; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ea26ce6a66845bd5afd0c78db008a09f948da191..bdc5e032b7831ed2fa98241fdec5796c5e5871f3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -5432,6 +5432,9 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu) data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1); smmu->archdata = data; + if (arm_smmu_is_static_cb(smmu)) + return 0; + ret = qsmmuv500_read_actlr_tbl(smmu); if (ret) return ret; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c458e26743e78bb5a102f7e3a92628272ebaa72c..3b1d92402d375891ba7b175fa47a05ffa6121d52 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -451,7 +451,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, ret_iova = (dma_addr_t)iova << shift; - if (guard_len && + if (ret_iova && guard_len && iommu_map(domain, ret_iova + size, page_to_phys(cookie->guard_page), guard_len, ARM_SMMU_GUARD_PROT)) { diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c index 9fe93ce8d0aec451c87e5fa1ff22eee86658c4ac..f008234a3930bc82ec9756511d012ba01e196d90 100644 --- a/drivers/iommu/iommu-debug.c +++ b/drivers/iommu/iommu-debug.c @@ -297,6 +297,7 @@ static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev, } } + domain->is_debug_domain = true; if (iommu_attach_group(domain, dev->iommu_group)) { seq_puts(s, "Couldn't attach new domain to device. Is it already attached?\n"); @@ -551,6 +552,7 @@ static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s, goto out_release_mapping; } + mapping->domain->is_debug_domain = true; if (arm_iommu_attach_device(dev, mapping)) { seq_puts(s, "fast_smmu_attach_device failed\n"); goto out_release_mapping; @@ -1173,6 +1175,7 @@ static int __apply_to_new_mapping(struct seq_file *s, goto out_release_mapping; } + mapping->domain->is_debug_domain = true; if (arm_iommu_attach_device(dev, mapping)) goto out_release_mapping; @@ -1239,6 +1242,7 @@ static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s, if (!mapping) goto out; + mapping->domain->is_debug_domain = true; if (arm_iommu_attach_device(dev, mapping)) goto out_release_mapping; diff --git a/drivers/irqchip/qcom/pdc-sm6150.c b/drivers/irqchip/qcom/pdc-sm6150.c index b05be12f5c78c1790148d0f385abc94884c4ac87..c2d3b7266c132649ae7beaf6ea43d2b3e095e0ef 100644 --- a/drivers/irqchip/qcom/pdc-sm6150.c +++ b/drivers/irqchip/qcom/pdc-sm6150.c @@ -109,8 +109,8 @@ static struct pdc_pin sm6150_data[] = { {91, 603},/*core_bi_px_gpio_94*/ {92, 604},/*core_bi_px_gpio_84*/ {93, 605},/*core_bi_px_gpio_102*/ - {94, 641},/*core_bi_px_gpio_98*/ - {95, 642},/*core_bi_px_gpio_99*/ + {94, 641},/*core_bi_px_gpio_99*/ + {95, 642},/*core_bi_px_gpio_98*/ {96, 643},/*core_bi_px_gpio_105*/ {97, 644},/*core_bi_px_gpio_107*/ {98, 645},/*gp_irq_hvm[68]*/ @@ -140,7 +140,7 @@ static struct pdc_pin sm6150_data[] = { {122, 669},/*core_bi_px_gpio_89*/ {123, 670},/*core_bi_px_gpio_51*/ {124, 671},/*core_bi_px_gpio_88*/ - {125, 95},/*core_bi_px_gpio_39*/ + {125, 672},/*core_bi_px_gpio_39*/ {-1}, }; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 38a5bb764c7b55cb8b742639e49756e413b4ab26..598724ffde4eaeb370c6b1ca49de027b8583fe2d 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1640,13 +1640,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) } else return -EINVAL; case IIOCDBGVAR: - if (arg) { - if (copy_to_user(argp, &dev, sizeof(ulong))) - return -EFAULT; - return 0; - } else - return -EINVAL; - break; + return -EINVAL; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c index 4798689f73cbf19757aec1dc9575637e87d36f49..8090b62aff8baba1cb155b51281e431cfc782311 100644 --- a/drivers/leds/leds-qti-tri-led.c +++ b/drivers/leds/leds-qti-tri-led.c @@ -44,14 +44,14 @@ #define PWM_PERIOD_DEFAULT_NS 1000000 struct pwm_setting { - u32 pre_period_ns; - u32 period_ns; - u32 duty_ns; + u64 pre_period_ns; + u64 period_ns; + u64 duty_ns; }; struct led_setting { - u32 on_ms; - u32 off_ms; + u64 on_ms; + u64 off_ms; enum led_brightness brightness; bool blink; bool breath; @@ -218,24 +218,16 @@ static int __tri_led_set(struct qpnp_led_dev *led) static int qpnp_tri_led_set(struct qpnp_led_dev *led) { - u32 on_ms, off_ms, period_ns, duty_ns; + u64 on_ms, off_ms, period_ns, duty_ns; enum led_brightness brightness = led->led_setting.brightness; int rc = 0; if (led->led_setting.blink) { on_ms = led->led_setting.on_ms; off_ms = led->led_setting.off_ms; - if (on_ms > INT_MAX / NSEC_PER_MSEC) - duty_ns = INT_MAX - 1; - else - duty_ns = on_ms * NSEC_PER_MSEC; - if (on_ms + off_ms > INT_MAX / NSEC_PER_MSEC) { - period_ns = INT_MAX; - duty_ns = (period_ns / (on_ms + off_ms)) * on_ms; - } else { - period_ns = (on_ms + off_ms) * NSEC_PER_MSEC; - } + duty_ns = on_ms * NSEC_PER_MSEC; + period_ns = (on_ms + off_ms) * NSEC_PER_MSEC; if (period_ns < duty_ns && duty_ns != 0) period_ns = duty_ns + 1; @@ -245,15 +237,14 @@ static int qpnp_tri_led_set(struct qpnp_led_dev *led) if (brightness == LED_OFF) duty_ns = 0; - else if (period_ns > INT_MAX / brightness) - duty_ns = (period_ns / LED_FULL) * brightness; - else - duty_ns = (period_ns * brightness) / LED_FULL; + + duty_ns = period_ns * brightness; + do_div(duty_ns, LED_FULL); if (period_ns < duty_ns && duty_ns != 0) period_ns = duty_ns + 1; } - dev_dbg(led->chip->dev, "PWM settings for %s led: period = %dns, duty = %dns\n", + dev_dbg(led->chip->dev, "PWM settings for %s led: period = %lluns, duty = %lluns\n", led->cdev.name, period_ns, duty_ns); led->pwm_setting.duty_ns = duty_ns; diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 888c9020ca067a9334904ff73bb3c50170fb6879..623ff1d4396ba3aacd3f29a3851f51622348411e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -573,6 +573,15 @@ config DM_ZONED If unsure, say N. +config DM_VERITY_AVB + tristate "Support AVB specific verity error behavior" + depends on DM_VERITY + ---help--- + Enables Android Verified Boot platform-specific error + behavior. In particular, it will modify the vbmeta partition + specified on the kernel command-line when non-transient error + occurs (followed by a panic). + config DM_ANDROID_VERITY bool "Android verity target support" depends on BLK_DEV_DM=y diff --git a/drivers/md/Makefile b/drivers/md/Makefile index bfd027659aafe63ba22adaeede557c0e600ac873..fb2e9a64378bbc1ec5a5b0599ca730b75f29966b 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -74,6 +74,10 @@ ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif +ifeq ($(CONFIG_DM_VERITY_AVB),y) +dm-verity-objs += dm-verity-avb.o +endif + ifeq ($(CONFIG_DM_ANDROID_VERITY),y) dm-verity-objs += dm-android-verity.o endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f575110454b6a2dd7df499e07fe15141184a7d06..98181028c7e9b4b1b3b6725d1f195c8c544bff10 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -125,7 +125,8 @@ struct iv_tcw_private { * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, + DM_CRYPT_ENCRYPT_OVERRIDE }; enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ @@ -2653,6 +2654,8 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; } else if (!strcasecmp(opt_string, "iv_large_sectors")) set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + else if (!strcasecmp(opt_string, "allow_encrypt_override")) + set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags); else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -2862,12 +2865,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. + * If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD, + * just bypass crypt queues. * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_opf & REQ_PREFLUSH || - bio_op(bio) == REQ_OP_DISCARD)) { + if (unlikely(bio->bi_opf & REQ_PREFLUSH) || + (unlikely(bio->bi_opf & REQ_NOENCRYPT) && + test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) || + bio_op(bio) == REQ_OP_DISCARD) { bio_set_dev(bio, cc->dev->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + @@ -2954,6 +2960,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, + &cc->flags); if (cc->on_disk_tag_size) num_feature_args++; if (num_feature_args) { @@ -2970,6 +2978,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" sector_size:%d", cc->sector_size); if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) DMEMIT(" iv_large_sectors"); + if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) + DMEMIT(" allow_encrypt_override"); } break; diff --git a/drivers/md/dm-verity-avb.c b/drivers/md/dm-verity-avb.c new file mode 100644 index 0000000000000000000000000000000000000000..a9f102aa379e9d5eb9dacf6484cacc78028a832f --- /dev/null +++ b/drivers/md/dm-verity-avb.c @@ -0,0 +1,229 @@ +/* + * Copyright (C) 2017 Google. + * + * This file is released under the GPLv2. + * + * Based on drivers/md/dm-verity-chromeos.c + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "verity-avb" + +/* Set via module parameters. */ +static char avb_vbmeta_device[64]; +static char avb_invalidate_on_error[4]; + +static void invalidate_vbmeta_endio(struct bio *bio) +{ + if (bio->bi_status) + DMERR("invalidate_vbmeta_endio: error %d", bio->bi_status); + complete(bio->bi_private); +} + +static int invalidate_vbmeta_submit(struct bio *bio, + struct block_device *bdev, + int op, int access_last_sector, + struct page *page) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + bio->bi_private = &wait; + bio->bi_end_io = invalidate_vbmeta_endio; + bio_set_dev(bio, bdev); + bio_set_op_attrs(bio, op, REQ_SYNC); + + bio->bi_iter.bi_sector = 0; + if (access_last_sector) { + sector_t last_sector; + + last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1; + bio->bi_iter.bi_sector = last_sector; + } + if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { + DMERR("invalidate_vbmeta_submit: bio_add_page error"); + return -EIO; + } + + submit_bio(bio); + /* Wait up to 2 seconds for completion or fail. */ + if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000))) + return -EIO; + return 0; +} + +static int invalidate_vbmeta(dev_t vbmeta_devt) +{ + int ret = 0; + struct block_device *bdev; + struct bio *bio; + struct page *page; + fmode_t dev_mode; + /* Ensure we do synchronous unblocked I/O. We may also need + * sync_bdev() on completion, but it really shouldn't. + */ + int access_last_sector = 0; + + DMINFO("invalidate_vbmeta: acting on device %d:%d", + MAJOR(vbmeta_devt), MINOR(vbmeta_devt)); + + /* First we open the device for reading. */ + dev_mode = FMODE_READ | FMODE_EXCL; + bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode, + invalidate_vbmeta); + if (IS_ERR(bdev)) { + DMERR("invalidate_kernel: could not open device for reading"); + dev_mode = 0; + ret = -ENOENT; + goto failed_to_read; + } + + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) { + ret = -ENOMEM; + goto failed_bio_alloc; + } + + page = alloc_page(GFP_NOIO); + if (!page) { + ret = -ENOMEM; + goto failed_to_alloc_page; + } + + access_last_sector = 0; + ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ, + access_last_sector, page); + if (ret) { + DMERR("invalidate_vbmeta: error reading"); + goto failed_to_submit_read; + } + + /* We have a page. Let's make sure it looks right. */ + if (memcmp("AVB0", page_address(page), 4) == 0) { + /* Stamp it. */ + memcpy(page_address(page), "AVE0", 4); + DMINFO("invalidate_vbmeta: found vbmeta partition"); + } else { + /* Could be this is on a AVB footer, check. Also, since the + * AVB footer is in the last 64 bytes, adjust for the fact that + * we're dealing with 512-byte sectors. + */ + size_t offset = (1<bi_remaining. + */ + bio_reset(bio); + + ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_WRITE, + access_last_sector, page); + if (ret) { + DMERR("invalidate_vbmeta: error writing"); + goto failed_to_submit_write; + } + + DMERR("invalidate_vbmeta: completed."); + ret = 0; +failed_to_submit_write: +failed_to_write: +invalid_header: + __free_page(page); +failed_to_submit_read: + /* Technically, we'll leak a page with the pending bio, but + * we're about to reboot anyway. + */ +failed_to_alloc_page: + bio_put(bio); +failed_bio_alloc: + if (dev_mode) + blkdev_put(bdev, dev_mode); +failed_to_read: + return ret; +} + +void dm_verity_avb_error_handler(void) +{ + dev_t dev; + + DMINFO("AVB error handler called for %s", avb_vbmeta_device); + + if (strcmp(avb_invalidate_on_error, "yes") != 0) { + DMINFO("Not configured to invalidate"); + return; + } + + if (avb_vbmeta_device[0] == '\0') { + DMERR("avb_vbmeta_device parameter not set"); + goto fail_no_dev; + } + + dev = name_to_dev_t(avb_vbmeta_device); + if (!dev) { + DMERR("No matching partition for device: %s", + avb_vbmeta_device); + goto fail_no_dev; + } + + invalidate_vbmeta(dev); + +fail_no_dev: + ; +} + +static int __init dm_verity_avb_init(void) +{ + DMINFO("AVB error handler initialized with vbmeta device: %s", + avb_vbmeta_device); + return 0; +} + +static void __exit dm_verity_avb_exit(void) +{ +} + +module_init(dm_verity_avb_init); +module_exit(dm_verity_avb_exit); + +MODULE_AUTHOR("David Zeuthen "); +MODULE_DESCRIPTION("AVB-specific error handler for dm-verity"); +MODULE_LICENSE("GPL"); + +/* Declare parameter with no module prefix */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "androidboot.vbmeta." +module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0); +module_param_string(invalidate_on_error, avb_invalidate_on_error, + sizeof(avb_invalidate_on_error), 0); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 1742af24fd9cfecdfc5b11cd85df8463c9b6acbb..0227c462f3c8cc99c9ba1e7a56103af714760fe8 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -276,8 +276,12 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, if (v->mode == DM_VERITY_MODE_LOGGING) return 0; - if (v->mode == DM_VERITY_MODE_RESTART) + if (v->mode == DM_VERITY_MODE_RESTART) { +#ifdef CONFIG_DM_VERITY_AVB + dm_verity_avb_error_handler(); +#endif kernel_restart("dm-verity device corrupted"); + } return 1; } diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index e80e06aa5ec6876f8dd2596c4a485157b97cd111..116e91fa50d77d1a54143913baf3749d06156ca2 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -142,4 +142,5 @@ extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits); extern void verity_dtr(struct dm_target *ti); extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv); extern int verity_map(struct dm_target *ti, struct bio *bio); +extern void dm_verity_avb_error_handler(void); #endif /* DM_VERITY_H */ diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c index cddbd8309ece7bd5b1736b4532cac5ccae87b92c..13a653ae0399eb623ceeb1f8831f238e037efc06 100644 --- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c +++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c @@ -624,7 +624,8 @@ static void cam_hw_cdm_work(struct work_struct *work) } static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain, - struct device *dev, unsigned long iova, int flags, void *token) + struct device *dev, unsigned long iova, int flags, void *token, + uint32_t buf_info) { struct cam_hw_info *cdm_hw = NULL; struct cam_cdm *core = NULL; @@ -910,7 +911,7 @@ int cam_hw_cdm_probe(struct platform_device *pdev) CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed"); goto unlock_release_mem; } - cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, + cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, cam_hw_cdm_iommu_fault_handler, cdm_hw); rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH); @@ -1034,7 +1035,7 @@ int cam_hw_cdm_probe(struct platform_device *pdev) flush_workqueue(cdm_core->work_queue); destroy_workqueue(cdm_core->work_queue); destroy_non_secure_hdl: - cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, + cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, NULL, cdm_hw); if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure)) CAM_ERR(CAM_CDM, "Release iommu secure hdl failed"); @@ -1106,8 +1107,8 @@ int cam_hw_cdm_remove(struct platform_device *pdev) if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure)) CAM_ERR(CAM_CDM, "Release iommu secure hdl failed"); - cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure, - NULL, cdm_hw); + cam_smmu_unset_client_page_fault_handler( + cdm_core->iommu_hdl.non_secure, cdm_hw); mutex_destroy(&cdm_hw->hw_mutex); kfree(cdm_hw->soc_info.soc_private); diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c index 20d3ecf13d3e5d861a9eabe555333fcfd269cc06..24e93f0ad4451795ebf4f6dcb7ca5b5208abb9cb 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c @@ -44,6 +44,7 @@ int cam_context_shutdown(struct cam_context *ctx) int rc = 0; int32_t ctx_hdl = ctx->dev_hdl; + mutex_lock(&ctx->ctx_mutex); if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) { rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev( ctx, NULL); @@ -56,6 +57,7 @@ int cam_context_shutdown(struct cam_context *ctx) if (rc < 0) CAM_ERR(CAM_CORE, "Error while dev release %d", rc); } + mutex_unlock(&ctx->ctx_mutex); if (!rc) rc = cam_destroy_device_hdl(ctx_hdl); @@ -223,6 +225,27 @@ int cam_context_handle_crm_process_evt(struct cam_context *ctx, return rc; } +int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova, + uint32_t buf_info) +{ + int rc = 0; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (ctx->state_machine[ctx->state].pagefault_ops) { + rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova, + buf_info); + } else { + CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d", + ctx->dev_hdl, ctx->state); + } + + return rc; +} + int cam_context_handle_acquire_dev(struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd) { @@ -264,6 +287,36 @@ int cam_context_handle_acquire_dev(struct cam_context *ctx, return rc; } +int cam_context_handle_acquire_hw(struct cam_context *ctx, + void *args) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!args) { + CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) { + rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw( + ctx, args); + } else { + CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d", + ctx->dev_name, ctx->state); + rc = -EPROTO; + } + + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + int cam_context_handle_release_dev(struct cam_context *ctx, struct cam_release_dev_cmd *cmd) { @@ -293,6 +346,35 @@ int cam_context_handle_release_dev(struct cam_context *ctx, return rc; } +int cam_context_handle_release_hw(struct cam_context *ctx, + void *args) +{ + int rc; + + if (!ctx->state_machine) { + CAM_ERR(CAM_CORE, "Context is not ready"); + return -EINVAL; + } + + if (!args) { + CAM_ERR(CAM_CORE, "Invalid release HW command payload"); + return -EINVAL; + } + + mutex_lock(&ctx->ctx_mutex); + if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) { + rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw( + ctx, args); + } else { + CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d", + ctx->dev_name, ctx->state); + rc = -EPROTO; + } + mutex_unlock(&ctx->ctx_mutex); + + return rc; +} + int cam_context_handle_flush_dev(struct cam_context *ctx, struct cam_flush_dev_cmd *cmd) { diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h index ffceea22ae0f426fd8893942e7c61c5deff8855f..003fea6a0df1676425a03bb8e3d54c30247b4e77 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context.h +++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h @@ -57,23 +57,25 @@ enum cam_context_state { * @num_out_acked: Number of out fence acked * @flushed: Request is flushed * @ctx: The context to which this request belongs + * @pf_data page fault debug data * */ struct cam_ctx_request { - struct list_head list; - uint32_t status; - uint64_t request_id; + struct list_head list; + uint32_t status; + uint64_t request_id; void *req_priv; - struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX]; - uint32_t num_hw_update_entries; - struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX]; - uint32_t num_in_map_entries; - struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX]; - uint32_t num_out_map_entries; - atomic_t num_in_acked; - uint32_t num_out_acked; - int flushed; - struct cam_context *ctx; + struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX]; + uint32_t num_hw_update_entries; + struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX]; + uint32_t num_in_map_entries; + struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX]; + uint32_t num_out_map_entries; + atomic_t num_in_acked; + uint32_t num_out_acked; + int flushed; + struct cam_context *ctx; + struct cam_hw_mgr_dump_pf_data pf_data; }; /** @@ -85,6 +87,8 @@ struct cam_ctx_request { * @start_dev: Function pointer for start device * @stop_dev: Function pointer for stop device * @flush_dev: Function pointer for flush device + * @acquire_hw: Function pointer for acquire hw + * @release_hw: Function pointer for release hw * */ struct cam_ctx_ioctl_ops { @@ -100,6 +104,8 @@ struct cam_ctx_ioctl_ops { struct cam_start_stop_dev_cmd *cmd); int (*flush_dev)(struct cam_context *ctx, struct cam_flush_dev_cmd *cmd); + int (*acquire_hw)(struct cam_context *ctx, void *args); + int (*release_hw)(struct cam_context *ctx, void *args); }; /** @@ -135,12 +141,14 @@ struct cam_ctx_crm_ops { * @ioctl_ops: Ioctl funciton table * @crm_ops: CRM to context interface function table * @irq_ops: Hardware event handle function + * @pagefault_ops: Function to be called on page fault * */ struct cam_ctx_ops { struct cam_ctx_ioctl_ops ioctl_ops; struct cam_ctx_crm_ops crm_ops; cam_hw_event_cb_func irq_ops; + cam_hw_pagefault_cb_func pagefault_ops; }; /** @@ -291,6 +299,19 @@ int cam_context_handle_crm_flush_req(struct cam_context *ctx, int cam_context_handle_crm_process_evt(struct cam_context *ctx, struct cam_req_mgr_link_evt_data *process_evt); +/** + * cam_context_dump_pf_info() + * + * @brief: Handle dump active request request command + * + * @ctx: Object pointer for cam_context + * @iova: Page fault address + * @buf_info: Information about closest memory handle + * + */ +int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova, + uint32_t buf_info); + /** * cam_context_handle_acquire_dev() * @@ -303,6 +324,18 @@ int cam_context_handle_crm_process_evt(struct cam_context *ctx, int cam_context_handle_acquire_dev(struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd); +/** + * cam_context_handle_acquire_hw() + * + * @brief: Handle acquire HW command + * + * @ctx: Object pointer for cam_context + * @cmd: Acquire HW command payload + * + */ +int cam_context_handle_acquire_hw(struct cam_context *ctx, + void *cmd); + /** * cam_context_handle_release_dev() * @@ -315,6 +348,18 @@ int cam_context_handle_acquire_dev(struct cam_context *ctx, int cam_context_handle_release_dev(struct cam_context *ctx, struct cam_release_dev_cmd *cmd); +/** + * cam_context_handle_release_hw() + * + * @brief: Handle release HW command + * + * @ctx: Object pointer for cam_context + * @cmd: Release HW command payload + * + */ +int cam_context_handle_release_hw(struct cam_context *ctx, + void *cmd); + /** * cam_context_handle_config_dev() * diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c index 6c2383ed811005558a3de010a7afd631ca592746..d78203a9609d23a387dfbc95be0e927ba2750f0b 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c @@ -337,6 +337,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx, cfg.out_map_entries = req->out_map_entries; cfg.max_in_map_entries = CAM_CTX_CFG_MAX; cfg.in_map_entries = req->in_map_entries; + cfg.pf_data = &(req->pf_data); rc = ctx->hw_mgr_intf->hw_prepare_update( ctx->hw_mgr_intf->hw_mgr_priv, &cfg); @@ -905,3 +906,38 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx) end: return rc; } + +int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx, + struct cam_packet *packet, unsigned long iova, uint32_t buf_info, + bool *mem_found) +{ + int rc = 0; + struct cam_hw_cmd_args cmd_args; + + if (!ctx) { + CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx); + rc = -EINVAL; + goto end; + } + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready", + ctx->dev_name, ctx->ctx_id); + rc = -EFAULT; + goto end; + } + + if (ctx->hw_mgr_intf->hw_cmd) { + cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map; + cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO; + cmd_args.u.pf_args.pf_data.packet = packet; + cmd_args.u.pf_args.iova = iova; + cmd_args.u.pf_args.buf_info = buf_info; + cmd_args.u.pf_args.mem_found = mem_found; + ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &cmd_args); + } + +end: + return rc; +} diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h index ca22b50cf5fa2dd67c765caf4b23b3dc209056c5..43e69405ee3b769cf8510330dc24768a8493dcb1 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h +++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h @@ -31,5 +31,8 @@ int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx, int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx); int32_t cam_context_flush_req_to_hw(struct cam_context *ctx, struct cam_flush_dev_cmd *cmd); +int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx, + struct cam_packet *packet, unsigned long iova, uint32_t buf_info, + bool *mem_found); #endif /* _CAM_CONTEXT_UTILS_H_ */ diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h index f7990b6d5d4a4f2b77539a794641d1f6d982b3d6..f427a4dac6d2495d684772319bd08b440729eb31 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h +++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h @@ -29,6 +29,10 @@ typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id, void *evt_data); +/* hardware page fault callback function type */ +typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova, + uint32_t buf_info); + /** * struct cam_hw_update_entry - Entry for hardware config * @@ -89,6 +93,7 @@ struct cam_hw_acquire_args { void *context_data; cam_hw_event_cb_func event_cb; uint32_t num_acq; + uint32_t acquire_info_size; uint64_t acquire_info; void *ctxt_to_hw_map; }; @@ -131,6 +136,16 @@ struct cam_hw_stop_args { void *args; }; + +/** + * struct cam_hw_mgr_dump_pf_data - page fault debug data + * + * packet: pointer to packet + */ +struct cam_hw_mgr_dump_pf_data { + void *packet; +}; + /** * struct cam_hw_prepare_update_args - Payload for prepare command * @@ -146,6 +161,7 @@ struct cam_hw_stop_args { * @in_map_entries: Actual input fence mapping list (returned) * @num_in_map_entries: Number of acutal input fence mapping (returned) * @priv: Private pointer of hw update + * @pf_data: Debug data for page fault * */ struct cam_hw_prepare_update_args { @@ -161,6 +177,7 @@ struct cam_hw_prepare_update_args { struct cam_hw_fence_map_entry *in_map_entries; uint32_t num_in_map_entries; void *priv; + struct cam_hw_mgr_dump_pf_data *pf_data; }; /** @@ -206,6 +223,48 @@ struct cam_hw_flush_args { enum flush_type_t flush_type; }; +/** + * struct cam_hw_dump_pf_args - Payload for dump pf info command + * + * @pf_data: Debug data for page fault + * @iova: Page fault address + * @buf_info: Info about memory buffer where page + * fault occurred + * @mem_found: If fault memory found in current + * request + * + */ +struct cam_hw_dump_pf_args { + struct cam_hw_mgr_dump_pf_data pf_data; + unsigned long iova; + uint32_t buf_info; + bool *mem_found; +}; + +/* enum cam_hw_mgr_command - Hardware manager command type */ +enum cam_hw_mgr_command { + CAM_HW_MGR_CMD_INTERNAL, + CAM_HW_MGR_CMD_DUMP_PF_INFO, +}; + +/** + * struct cam_hw_cmd_args - Payload for hw manager command + * + * @ctxt_to_hw_map: HW context from the acquire + * @cmd_type HW command type + * @internal_args Arguments for internal command + * @pf_args Arguments for Dump PF info command + * + */ +struct cam_hw_cmd_args { + void *ctxt_to_hw_map; + uint32_t cmd_type; + union { + void *internal_args; + struct cam_hw_dump_pf_args pf_args; + } u; +}; + /** * cam_hw_mgr_intf - HW manager interface * diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c index f0dc8b500b490f03881d902c0907d906a48ca88f..6584868e418ffdec16555b63018f33770d4c7883 100644 --- a/drivers/media/platform/msm/camera/cam_core/cam_node.c +++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c @@ -128,6 +128,45 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node, return rc; } +static int __cam_node_handle_acquire_hw_v1(struct cam_node *node, + struct cam_acquire_hw_cmd_v1 *acquire) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!acquire) + return -EINVAL; + + if (acquire->dev_handle <= 0) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return -EINVAL; + } + + if (acquire->session_handle <= 0) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return -EINVAL; + } + + ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d", + acquire->dev_handle); + return -EINVAL; + } + + rc = cam_context_handle_acquire_hw(ctx, acquire); + if (rc) { + CAM_ERR(CAM_CORE, "Acquire device failed for node %s", + node->name); + return rc; + } + + CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d", + node->name, ctx->ctx_id); + + return 0; +} + static int __cam_node_handle_start_dev(struct cam_node *node, struct cam_start_stop_dev_cmd *start) { @@ -313,6 +352,43 @@ static int __cam_node_handle_release_dev(struct cam_node *node, return rc; } +static int __cam_node_handle_release_hw_v1(struct cam_node *node, + struct cam_release_hw_cmd_v1 *release) +{ + int rc = 0; + struct cam_context *ctx = NULL; + + if (!release) + return -EINVAL; + + if (release->dev_handle <= 0) { + CAM_ERR(CAM_CORE, "Invalid device handle for context"); + return -EINVAL; + } + + if (release->session_handle <= 0) { + CAM_ERR(CAM_CORE, "Invalid session handle for context"); + return -EINVAL; + } + + ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle); + if (!ctx) { + CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s", + release->dev_handle, node->name); + return -EINVAL; + } + + rc = cam_context_handle_release_hw(ctx, release); + if (rc) + CAM_ERR(CAM_CORE, "context release failed node %s", node->name); + + CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d", + node->name, ctx->ctx_id, + atomic_read(&(ctx->refcount.refcount.refs))); + + return rc; +} + static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info) { struct cam_context *ctx = NULL; @@ -430,6 +506,9 @@ int cam_node_shutdown(struct cam_node *node) for (i = 0; i < node->ctx_size; i++) { if (node->ctx_list[i].dev_hdl > 0) { + CAM_DBG(CAM_CORE, + "Node [%s] invoking shutdown on context [%d]", + node->name, i); rc = cam_context_shutdown(&(node->ctx_list[i])); if (rc) continue; @@ -539,6 +618,56 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd) rc = -EFAULT; break; } + case CAM_ACQUIRE_HW: { + uint32_t api_version; + void *acquire_ptr = NULL; + size_t acquire_size; + + if (copy_from_user(&api_version, (void __user *)cmd->handle, + sizeof(api_version))) { + rc = -EFAULT; + break; + } + + if (api_version == 1) { + acquire_size = sizeof(struct cam_acquire_hw_cmd_v1); + } else { + CAM_ERR(CAM_CORE, "Unsupported api version %d", + api_version); + rc = -EINVAL; + break; + } + + acquire_ptr = kzalloc(acquire_size, GFP_KERNEL); + if (!acquire_ptr) { + CAM_ERR(CAM_CORE, "No memory for acquire HW"); + rc = -ENOMEM; + break; + } + + if (copy_from_user(acquire_ptr, (void __user *)cmd->handle, + acquire_size)) { + rc = -EFAULT; + goto acquire_kfree; + } + + if (api_version == 1) { + rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr); + if (rc) { + CAM_ERR(CAM_CORE, + "acquire device failed(rc = %d)", rc); + goto acquire_kfree; + } + } + + if (copy_to_user((void __user *)cmd->handle, acquire_ptr, + acquire_size)) + rc = -EFAULT; + +acquire_kfree: + kfree(acquire_ptr); + break; + } case CAM_START_DEV: { struct cam_start_stop_dev_cmd start; @@ -595,6 +724,50 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd) } break; } + case CAM_RELEASE_HW: { + uint32_t api_version; + size_t release_size; + void *release_ptr = NULL; + + if (copy_from_user(&api_version, (void __user *)cmd->handle, + sizeof(api_version))) { + rc = -EFAULT; + break; + } + + if (api_version == 1) { + release_size = sizeof(struct cam_release_hw_cmd_v1); + } else { + CAM_ERR(CAM_CORE, "Unsupported api version %d", + api_version); + rc = -EINVAL; + break; + } + + release_ptr = kzalloc(release_size, GFP_KERNEL); + if (!release_ptr) { + CAM_ERR(CAM_CORE, "No memory for release HW"); + rc = -ENOMEM; + break; + } + + if (copy_from_user(release_ptr, (void __user *)cmd->handle, + release_size)) { + rc = -EFAULT; + goto release_kfree; + } + + if (api_version == 1) { + rc = __cam_node_handle_release_hw_v1(node, release_ptr); + if (rc) + CAM_ERR(CAM_CORE, + "release device failed(rc = %d)", rc); + } + +release_kfree: + kfree(release_ptr); + break; + } case CAM_FLUSH_REQ: { struct cam_flush_dev_cmd flush; diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c index 502c95d4c60ecd888e78940b7f66a584c059aa07..522a602883eb157900d1e25acc77322f0b6ccf8f 100644 --- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c +++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c @@ -25,9 +25,46 @@ #include "cam_mem_mgr.h" #include "cam_trace.h" #include "cam_debug_util.h" +#include "cam_packet_util.h" static const char icp_dev_name[] = "icp"; +static int cam_icp_context_dump_active_request(void *data, unsigned long iova, + uint32_t buf_info) +{ + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL; + int rc = 0; + bool b_mem_found = false; + + if (!ctx) { + CAM_ERR(CAM_ICP, "Invalid ctx"); + return -EINVAL; + } + + CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + pf_dbg_entry = &(req->pf_data); + CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet, + iova, buf_info, &b_mem_found); + if (rc) + CAM_ERR(CAM_ICP, "Failed to dump pf info"); + + if (b_mem_found) + CAM_ERR(CAM_ICP, "Found page fault in req %lld %d", + req->request_id, rc); + } + + return rc; +} + static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd) { @@ -156,6 +193,7 @@ static struct cam_ctx_ops }, .crm_ops = {}, .irq_ops = __cam_icp_handle_buf_done_in_ready, + .pagefault_ops = cam_icp_context_dump_active_request, }, /* Ready */ { @@ -167,12 +205,14 @@ static struct cam_ctx_ops }, .crm_ops = {}, .irq_ops = __cam_icp_handle_buf_done_in_ready, + .pagefault_ops = cam_icp_context_dump_active_request, }, /* Activated */ { .ioctl_ops = {}, .crm_ops = {}, .irq_ops = NULL, + .pagefault_ops = cam_icp_context_dump_active_request, }, }; diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c index fa714c8c7fea02f5cdd2158f5fdeaffb5ca07ea5..699ad5f2a0b4b3edf3718bb48f8272eb4bb72c2f 100644 --- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c +++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c @@ -35,6 +35,7 @@ #include "cam_hw_mgr_intf.h" #include "cam_icp_hw_mgr_intf.h" #include "cam_debug_util.h" +#include "cam_smmu_api.h" #define CAM_ICP_DEV_NAME "cam-icp" @@ -55,6 +56,25 @@ static const struct of_device_id cam_icp_dt_match[] = { {} }; +static void cam_icp_dev_iommu_fault_handler( + struct iommu_domain *domain, struct device *dev, unsigned long iova, + int flags, void *token, uint32_t buf_info) +{ + int i = 0; + struct cam_node *node = NULL; + + if (!token) { + CAM_ERR(CAM_ICP, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)token; + + for (i = 0; i < node->ctx_size; i++) + cam_context_dump_pf_info(&(node->ctx_list[i]), iova, + buf_info); +} + static int cam_icp_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { @@ -96,7 +116,7 @@ static int cam_icp_subdev_close(struct v4l2_subdev *sd, mutex_lock(&g_icp_dev.icp_lock); if (g_icp_dev.open_cnt <= 0) { - CAM_ERR(CAM_ICP, "ICP subdev is already closed"); + CAM_DBG(CAM_ICP, "ICP subdev is already closed"); rc = -EINVAL; goto end; } @@ -135,6 +155,7 @@ static int cam_icp_probe(struct platform_device *pdev) int rc = 0, i = 0; struct cam_node *node; struct cam_hw_mgr_intf *hw_mgr_intf; + int iommu_hdl = -1; if (!pdev) { CAM_ERR(CAM_ICP, "pdev is NULL"); @@ -158,7 +179,8 @@ static int cam_icp_probe(struct platform_device *pdev) goto hw_alloc_fail; } - rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf); + rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf, + &iommu_hdl); if (rc) { CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc); goto hw_init_fail; @@ -181,6 +203,9 @@ static int cam_icp_probe(struct platform_device *pdev) goto ctx_fail; } + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_icp_dev_iommu_fault_handler, node); + g_icp_dev.open_cnt = 0; mutex_init(&g_icp_dev.icp_lock); diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c index f91e29a8f2d97c68ee67dae04b1bd0106db9a3a6..cd44d834dd0654cb8bf1aa8895e46cd889915b2f 100644 --- a/drivers/media/platform/msm/camera/cam_icp/hfi.c +++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c @@ -533,6 +533,11 @@ void cam_hfi_disable_cpu(void __iomem *icp_base) val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET); cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET); + + cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET, + icp_base + HFI_REG_HOST_ICP_INIT_REQUEST); + cam_io_w_mb((uint32_t)INTR_DISABLE, + g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN); } void cam_hfi_enable_cpu(void __iomem *icp_base) @@ -883,11 +888,6 @@ void cam_hfi_deinit(void __iomem *icp_base) g_hfi->cmd_q_state = false; g_hfi->msg_q_state = false; - cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET, - icp_base + HFI_REG_HOST_ICP_INIT_REQUEST); - - cam_io_w_mb((uint32_t)INTR_DISABLE, - g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN); kzfree(g_hfi); g_hfi = NULL; diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c index d01637436a51a312fcb5f602c4287fab9927fba2..e73d538063832e3018968090a9c42f4d25908222 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c @@ -199,8 +199,10 @@ static int cam_bps_handle_resume(struct cam_hw_info *bps_dev) cam_cpas_reg_read(core_info->cpas_handle, CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl); if (pwr_ctrl & BPS_COLLAPSE_MASK) { - CAM_ERR(CAM_ICP, "BPS: pwr_ctrl(%x)", pwr_ctrl); - return -EINVAL; + CAM_WARN(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REG_CPASTOP, + hw_info->pwr_ctrl, true, 0); } rc = cam_bps_transfer_gdsc_control(soc_info); diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c index f4d63ca0244282706219938be97bde5f935cbed1..ce0389306c0a8737be9a9dae9e139b7a2e6b7ff5 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c @@ -2681,7 +2681,6 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args) cam_icp_free_hfi_mem(); hw_mgr->fw_download = false; - hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE; CAM_DBG(CAM_ICP, "Exit"); return rc; @@ -3389,10 +3388,11 @@ static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr, prepare_args->num_out_map_entries++; } CAM_DBG(CAM_REQ, - "ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u", + "ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x", ctx_data->ctx_id, packet->header.request_id, i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence, - io_cfg_ptr[i].resource_type); + io_cfg_ptr[i].resource_type, + io_cfg_ptr[i].mem_handle[0]); } if (prepare_args->num_in_map_entries > 1) @@ -3594,6 +3594,77 @@ static int cam_icp_mgr_update_hfi_frame_process( return rc; } +static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info, + bool *mem_found) +{ + uint64_t iova_addr; + size_t src_buf_size; + int i; + int j; + int rc = 0; + int32_t mmu_hdl; + + struct cam_buf_io_cfg *io_cfg = NULL; + + if (mem_found) + *mem_found = false; + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload + + packet->io_configs_offset / 4); + + for (i = 0; i < packet->num_io_configs; i++) { + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) == + GET_FD_FROM_HANDLE(pf_buf_info)) { + CAM_INFO(CAM_ICP, + "Found PF at port: %d mem %x fd: %x", + io_cfg[i].resource_type, + io_cfg[i].mem_handle[j], + pf_buf_info); + if (mem_found) + *mem_found = true; + } + + CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d", + io_cfg[i].resource_type, + io_cfg[i].fence, + io_cfg[i].format, + io_cfg[i].direction); + + mmu_hdl = cam_mem_is_secure_buf( + io_cfg[i].mem_handle[j]) ? sec_mmu_hdl : + iommu_hdl; + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], + mmu_hdl, &iova_addr, &src_buf_size); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "get src buf address fail"); + continue; + } + if (iova_addr >> 32) { + CAM_ERR(CAM_ICP, "Invalid mapped address"); + rc = -EINVAL; + continue; + } + + CAM_INFO(CAM_ICP, + "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x", + j, io_cfg[i].planes[j].width, + io_cfg[i].planes[j].height, + (int32_t)src_buf_size, + (unsigned int)iova_addr, + io_cfg[i].offsets[j], + io_cfg[i].mem_handle[j]); + + iova_addr += io_cfg[i].offsets[j]; + + } + } +} + static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv, void *prepare_hw_update_args) { @@ -3636,6 +3707,8 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv, return rc; } + prepare_args->pf_data->packet = packet; + CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u", packet->header.request_id, ctx_data->ctx_id); /* Update Buffer Address from handles and patch information */ @@ -3940,7 +4013,6 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args) CAM_DBG(CAM_ICP, "Last Release"); cam_icp_mgr_icp_power_collapse(hw_mgr); cam_icp_hw_mgr_reset_clk_info(hw_mgr); - hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE; rc = cam_ipe_bps_deint(hw_mgr); } mutex_unlock(&hw_mgr->hw_mgr_mutex); @@ -4072,47 +4144,29 @@ static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr, return -EFAULT; } - if (!hw_mgr->ctxt_cnt) { - hw_mgr->secure_mode = icp_dev_acquire_info.secure_mode; - } else { - if (hw_mgr->secure_mode != icp_dev_acquire_info.secure_mode) { - CAM_ERR(CAM_ICP, - "secure mode mismatch driver:%d, context:%d", - hw_mgr->secure_mode, - icp_dev_acquire_info.secure_mode); - return -EINVAL; - } - } - acquire_size = sizeof(struct cam_icp_acquire_dev_info) + ((icp_dev_acquire_info.num_out_res - 1) * sizeof(struct cam_icp_res_info)); ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL); - if (!ctx_data->icp_dev_acquire_info) { - if (!hw_mgr->ctxt_cnt) - hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE; + if (!ctx_data->icp_dev_acquire_info) return -ENOMEM; - } if (copy_from_user(ctx_data->icp_dev_acquire_info, (void __user *)args->acquire_info, acquire_size)) { CAM_ERR(CAM_ICP, "Failed in acquire: size = %d", acquire_size); - if (!hw_mgr->ctxt_cnt) - hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE; kfree(ctx_data->icp_dev_acquire_info); ctx_data->icp_dev_acquire_info = NULL; return -EFAULT; } - CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x %u", + CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x", ctx_data->icp_dev_acquire_info->dev_type, ctx_data->icp_dev_acquire_info->in_res.format, ctx_data->icp_dev_acquire_info->in_res.width, ctx_data->icp_dev_acquire_info->in_res.height, ctx_data->icp_dev_acquire_info->in_res.fps, ctx_data->icp_dev_acquire_info->num_out_res, - ctx_data->icp_dev_acquire_info->scratch_mem_size, - hw_mgr->secure_mode); + ctx_data->icp_dev_acquire_info->scratch_mem_size); p_icp_out = ctx_data->icp_dev_acquire_info->out_res; for (i = 0; i < icp_dev_acquire_info.num_out_res; i++) @@ -4520,8 +4574,8 @@ static int cam_icp_mgr_create_wq(void) if (rc) goto debugfs_create_failed; - icp_hw_mgr.icp_pc_flag = false; - icp_hw_mgr.ipe_bps_pc_flag = false; + icp_hw_mgr.icp_pc_flag = true; + icp_hw_mgr.ipe_bps_pc_flag = true; for (i = 0; i < ICP_WORKQ_NUM_TASK; i++) icp_hw_mgr.msg_work->task.pool[i].payload = @@ -4552,7 +4606,35 @@ static int cam_icp_mgr_create_wq(void) return rc; } -int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) +static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_ICP, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_icp_mgr_print_io_bufs( + hw_cmd_args->u.pf_args.pf_data.packet, + hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, + hw_cmd_args->u.pf_args.buf_info, + hw_cmd_args->u.pf_args.mem_found); + break; + default: + CAM_ERR(CAM_ICP, "Invalid cmd"); + } + + return rc; +} + +int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl) { int i, rc = 0; struct cam_hw_mgr_intf *hw_mgr_intf; @@ -4575,6 +4657,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u; hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u; hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush; + hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd; icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE; mutex_init(&icp_hw_mgr.hw_mgr_mutex); @@ -4618,6 +4701,9 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) if (rc) goto icp_wq_create_failed; + if (iommu_hdl) + *iommu_hdl = icp_hw_mgr.iommu_hdl; + init_completion(&icp_hw_mgr.a5_complete); return rc; diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h index 771c4ed7c55cbbf9c0968582f96bcd443298e93b..7bb9b9ed18a2dc0a24ff8f25b19a0c6f0c6f00af 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h @@ -28,7 +28,7 @@ #define CPAS_IPE1_BIT 0x2000 int cam_icp_hw_mgr_init(struct device_node *of_node, - uint64_t *hw_mgr_hdl); + uint64_t *hw_mgr_hdl, int *iommu_hdl); /** * struct cam_icp_cpas_vote diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c index 620a4bd4943b290d5bfad96bcdb23d1617f94e9d..21035c74acc7091cd85d32402d8e872e337ae84d 100644 --- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c +++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c @@ -195,9 +195,12 @@ static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev) CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl); if (pwr_ctrl & IPE_COLLAPSE_MASK) { - CAM_ERR(CAM_ICP, "IPE: resume failed : %d", pwr_ctrl); - return -EINVAL; + CAM_WARN(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl); + cam_cpas_reg_write(core_info->cpas_handle, + CAM_CPAS_REG_CPASTOP, + hw_info->pwr_ctrl, true, 0); } + rc = cam_ipe_transfer_gdsc_control(soc_info); cam_cpas_reg_read(core_info->cpas_handle, CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl); diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c index 63d4bc7d7c22789579340734e5a7558b943f0758..99ff87890b36e88c0b5670f19fd2428ee1536eb5 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c @@ -22,6 +22,8 @@ #include "cam_req_mgr_dev.h" #include "cam_trace.h" #include "cam_debug_util.h" +#include "cam_packet_util.h" +#include "cam_context_utils.h" static const char isp_dev_name[] = "isp"; @@ -29,6 +31,9 @@ static const char isp_dev_name[] = "isp"; (atomic64_add_return(1, head) % \ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) +static int cam_isp_context_dump_active_request(void *data, unsigned long iova, + uint32_t buf_info); + static void __cam_isp_ctx_update_state_monitor_array( struct cam_isp_context *ctx_isp, enum cam_isp_state_change_trigger trigger_type, @@ -376,7 +381,7 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state( continue; } - if (!bubble_state) { + if (!req_isp->bubble_detected) { CAM_DBG(CAM_ISP, "Sync with success: req %lld res 0x%x fd 0x%x", req->request_id, @@ -403,15 +408,14 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state( } else { /* * Ignore the buffer done if bubble detect is on - * In most case, active list should be empty when - * bubble detects. But for safety, we just move the - * current active request to the pending list here. + * Increment the ack number here, and queue the + * request back to pending list whenever all the + * buffers are done. */ + req_isp->num_acked++; CAM_DBG(CAM_ISP, "buf done with bubble state %d recovery %d", bubble_state, req_isp->bubble_report); - list_del_init(&req->list); - list_add(&req->list, &ctx->pending_req_list); continue; } @@ -432,10 +436,25 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state( req_isp->num_fence_map_out); WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out); } - if (req_isp->num_acked == req_isp->num_fence_map_out) { + + if (req_isp->num_acked != req_isp->num_fence_map_out) + return rc; + + ctx_isp->active_req_cnt--; + + if (req_isp->bubble_detected && req_isp->bubble_report) { + req_isp->num_acked = 0; + req_isp->bubble_detected = false; + list_del_init(&req->list); + list_add(&req->list, &ctx->pending_req_list); + + CAM_DBG(CAM_REQ, + "Move active request %lld to pending list(cnt = %d) [bubble recovery]", + req->request_id, ctx_isp->active_req_cnt); + } else { list_del_init(&req->list); list_add_tail(&req->list, &ctx->free_req_list); - ctx_isp->active_req_cnt--; + CAM_DBG(CAM_REQ, "Move active request %lld to free list(cnt = %d) [all fences done]", req->request_id, ctx_isp->active_req_cnt); @@ -733,15 +752,13 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report); if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { struct cam_req_mgr_error_notify notify; - list_del_init(&req->list); - list_add(&req->list, &ctx->pending_req_list); - notify.link_hdl = ctx->link_hdl; notify.dev_hdl = ctx->dev_hdl; notify.req_id = req->request_id; @@ -750,18 +767,19 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp, CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld", ctx_isp->frame_id); } else { - /* - * Since can not bubble report, always move the request to - * active list. - */ - list_del_init(&req->list); - list_add_tail(&req->list, &ctx->active_req_list); - ctx_isp->active_req_cnt++; - CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)", - req->request_id, ctx_isp->active_req_cnt); req_isp->bubble_report = 0; } + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)", + req->request_id, ctx_isp->active_req_cnt); + ctx_isp->active_req_cnt++; + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + if (req->request_id > ctx_isp->reported_req_id) { request_id = req->request_id; ctx_isp->reported_req_id = request_id; @@ -883,13 +901,12 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); req_isp = (struct cam_isp_ctx_req *)req->req_priv; - list_del_init(&req->list); + req_isp->bubble_detected = true; if (req_isp->bubble_report && ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) { struct cam_req_mgr_error_notify notify; - list_add(&req->list, &ctx->pending_req_list); notify.link_hdl = ctx->link_hdl; notify.dev_hdl = ctx->dev_hdl; notify.req_id = req->request_id; @@ -899,17 +916,19 @@ static int __cam_isp_ctx_epoch_in_bubble_applied( "Notify CRM about Bubble req_id %llu frame %lld", req->request_id, ctx_isp->frame_id); } else { - /* - * If we can not report bubble, then treat it as if no bubble - * report. Just move the req to active list. - */ - list_add_tail(&req->list, &ctx->active_req_list); - ctx_isp->active_req_cnt++; - CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)", - req->request_id, ctx_isp->active_req_cnt); req_isp->bubble_report = 0; } + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)", + req->request_id, ctx_isp->active_req_cnt); + ctx_isp->active_req_cnt++; + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + if (!req_isp->bubble_report) { if (req->request_id > ctx_isp->reported_req_id) { request_id = req->request_id; @@ -1410,6 +1429,7 @@ static int __cam_isp_ctx_flush_req_in_top_state( CAM_DBG(CAM_ISP, "try to flush active list"); rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list, flush_req); + ctx_isp->active_req_cnt = 0; spin_unlock_bh(&ctx->lock); /* Start hw */ @@ -1608,12 +1628,12 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied( CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx", ctx_isp->frame_id, ctx_isp->sof_timestamp_val); - if (list_empty(&ctx->pending_req_list)) { + if (list_empty(&ctx->wait_req_list)) { /* * If no pending req in epoch, this is an error case. * The recovery is to go back to sof state */ - CAM_ERR(CAM_ISP, "No pending request"); + CAM_ERR(CAM_ISP, "No wait request"); ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF; /* Send SOF event as empty frame*/ @@ -1623,9 +1643,10 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied( goto end; } - req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request, + req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request, list); req_isp = (struct cam_isp_ctx_req *)req->req_priv; + req_isp->bubble_detected = true; CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report); if (req_isp->bubble_report && ctx->ctx_crm_intf && @@ -1640,18 +1661,19 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied( CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld", ctx_isp->frame_id); } else { - /* - * Since can not bubble report, always move the request to - * active list. - */ - list_del_init(&req->list); - list_add_tail(&req->list, &ctx->active_req_list); - ctx_isp->active_req_cnt++; - CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)", - req->request_id, ctx_isp->active_req_cnt); req_isp->bubble_report = 0; } + /* + * Always move the request to active list. Let buf done + * function handles the rest. + */ + ctx_isp->active_req_cnt++; + list_del_init(&req->list); + list_add_tail(&req->list, &ctx->active_req_list); + CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)", + req->request_id, ctx_isp->active_req_cnt); + if (!req_isp->bubble_report) { if (req->request_id > ctx_isp->reported_req_id) { request_id = req->request_id; @@ -1962,6 +1984,11 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx, (struct cam_isp_context *) ctx->ctx_priv; struct cam_req_mgr_flush_request flush_req; + if (cmd && ctx_isp->hw_ctx && ctx_isp->split_acquire) { + CAM_ERR(CAM_ISP, "ctx expects release HW before release dev"); + return rc; + } + if (ctx_isp->hw_ctx) { rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx; ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, @@ -1976,6 +2003,8 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx, ctx_isp->frame_id = 0; ctx_isp->active_req_cnt = 0; ctx_isp->reported_req_id = 0; + ctx_isp->hw_acquired = false; + ctx_isp->init_received = false; /* * Ideally, we should never have any active request here. @@ -2001,6 +2030,54 @@ static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx, return rc; } +static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx, + void *cmd) +{ + int rc = 0; + struct cam_hw_release_args rel_arg; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_req_mgr_flush_request flush_req; + + if (ctx_isp->hw_ctx) { + rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx; + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &rel_arg); + ctx_isp->hw_ctx = NULL; + } else { + CAM_ERR(CAM_ISP, "No hw resources acquired for this ctx"); + } + + ctx_isp->frame_id = 0; + ctx_isp->active_req_cnt = 0; + ctx_isp->reported_req_id = 0; + ctx_isp->hw_acquired = false; + ctx_isp->init_received = false; + + /* + * Ideally, we should never have any active request here. + * But we still add some sanity check code here to help the debug + */ + if (!list_empty(&ctx->active_req_list)) + CAM_WARN(CAM_ISP, "Active list is not empty"); + + /* Flush all the pending request list */ + flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL; + flush_req.link_hdl = ctx->link_hdl; + flush_req.dev_hdl = ctx->dev_hdl; + + CAM_DBG(CAM_ISP, "try to flush pending list"); + spin_lock_bh(&ctx->lock); + rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req); + spin_unlock_bh(&ctx->lock); + ctx->state = CAM_CTX_ACQUIRED; + + trace_cam_context_state("ISP", ctx); + CAM_DBG(CAM_ISP, "Release device success[%u] next state %d", + ctx->ctx_id, ctx->state); + return rc; +} + static int __cam_isp_ctx_config_dev_in_top_state( struct cam_context *ctx, struct cam_config_dev_cmd *cmd) { @@ -2065,6 +2142,7 @@ static int __cam_isp_ctx_config_dev_in_top_state( cfg.out_map_entries = req_isp->fence_map_out; cfg.in_map_entries = req_isp->fence_map_in; cfg.priv = &req_isp->hw_update_data; + cfg.pf_data = &(req->pf_data); CAM_DBG(CAM_ISP, "try to prepare config packet......"); @@ -2079,6 +2157,7 @@ static int __cam_isp_ctx_config_dev_in_top_state( req_isp->num_fence_map_out = cfg.num_out_map_entries; req_isp->num_fence_map_in = cfg.num_in_map_entries; req_isp->num_acked = 0; + req_isp->bubble_detected = false; for (i = 0; i < req_isp->num_fence_map_out; i++) { rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id); @@ -2106,6 +2185,7 @@ static int __cam_isp_ctx_config_dev_in_top_state( rc = __cam_isp_ctx_enqueue_init_request(ctx, req); if (rc) CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed"); + ctx_isp->init_received = true; } else { rc = -EINVAL; CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state"); @@ -2163,7 +2243,8 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, struct cam_hw_release_args release; struct cam_isp_context *ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; - struct cam_isp_hw_cmd_args hw_cmd_args; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; if (!ctx->hw_mgr_intf) { CAM_ERR(CAM_ISP, "HW interface is not ready"); @@ -2176,6 +2257,12 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, cmd->session_handle, cmd->num_resources, cmd->handle_type, cmd->resource_hdl); + if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) { + ctx_isp->split_acquire = true; + CAM_DBG(CAM_ISP, "Acquire dev handle"); + goto get_dev_handle; + } + if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) { CAM_ERR(CAM_ISP, "Too much resources in the acquire"); rc = -ENOMEM; @@ -2220,7 +2307,9 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, /* Query the context has rdi only resource */ hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; - hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args); if (rc) { @@ -2228,7 +2317,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, goto free_hw; } - if (hw_cmd_args.u.is_rdi_only_context) { + if (isp_hw_cmd_args.u.is_rdi_only_context) { /* * this context has rdi only resource assign rdi only * state machine @@ -2247,8 +2336,16 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, cam_isp_ctx_activated_state_machine; } - ctx_isp->rdi_only_context = hw_cmd_args.u.is_rdi_only_context; + ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context; ctx_isp->hw_ctx = param.ctxt_to_hw_map; + ctx_isp->hw_acquired = true; + ctx_isp->split_acquire = false; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + + kfree(isp_res); + isp_res = NULL; + +get_dev_handle: req_hdl_param.session_hdl = cmd->session_handle; /* bridge is not ready for these flags. so false for now */ @@ -2268,31 +2365,178 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx, /* store session information */ ctx->session_hdl = cmd->session_handle; - ctx->state = CAM_CTX_ACQUIRED; trace_cam_context_state("ISP", ctx); CAM_DBG(CAM_ISP, - "Acquire success on session_hdl 0x%x num_rsrces %d RDI only %d ctx %u", - cmd->session_handle, cmd->num_resources, - (hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id); + "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u", + cmd->session_handle, cmd->num_resources, ctx->ctx_id); + + return rc; + +free_hw: + release.ctxt_to_hw_map = ctx_isp->hw_ctx; + if (ctx_isp->hw_acquired) + ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, + &release); + ctx_isp->hw_ctx = NULL; + ctx_isp->hw_acquired = false; +free_res: kfree(isp_res); +end: + return rc; +} + +static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx, + void *args) +{ + int rc = 0; + struct cam_acquire_hw_cmd_v1 *cmd = + (struct cam_acquire_hw_cmd_v1 *)args; + struct cam_hw_acquire_args param; + struct cam_hw_release_args release; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; + struct cam_isp_acquire_hw_info *acquire_hw_info = NULL; + + if (!ctx->hw_mgr_intf) { + CAM_ERR(CAM_ISP, "HW interface is not ready"); + rc = -EFAULT; + goto end; + } + + CAM_DBG(CAM_ISP, + "session_hdl 0x%x, hdl type %d, res %lld", + cmd->session_handle, cmd->handle_type, cmd->resource_hdl); + + /* for now we only support user pointer */ + if (cmd->handle_type != 1) { + CAM_ERR(CAM_ISP, "Only user pointer is supported"); + rc = -EINVAL; + goto end; + } + + if (cmd->data_size < sizeof(*acquire_hw_info)) { + CAM_ERR(CAM_ISP, "data_size is not a valid value"); + goto end; + } + + acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL); + if (!acquire_hw_info) { + rc = -ENOMEM; + goto end; + } + + CAM_DBG(CAM_ISP, "start copy resources from user"); + + if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl, + cmd->data_size)) { + rc = -EFAULT; + goto free_res; + } + + param.context_data = ctx; + param.event_cb = ctx->irq_cb_intf; + param.num_acq = CAM_API_COMPAT_CONSTANT; + param.acquire_info_size = cmd->data_size; + param.acquire_info = (uint64_t) acquire_hw_info; + + /* call HW manager to reserve the resource */ + rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv, + ¶m); + if (rc != 0) { + CAM_ERR(CAM_ISP, "Acquire device failed"); + goto free_res; + } + + /* Query the context has rdi only resource */ + hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; + rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, + &hw_cmd_args); + if (rc) { + CAM_ERR(CAM_ISP, "HW command failed"); + goto free_hw; + } + + if (isp_hw_cmd_args.u.is_rdi_only_context) { + /* + * this context has rdi only resource assign rdi only + * state machine + */ + CAM_DBG(CAM_ISP, "RDI only session Context"); + + ctx_isp->substate_machine_irq = + cam_isp_ctx_rdi_only_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_rdi_only_activated_state_machine; + } else { + CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources"); + ctx_isp->substate_machine_irq = + cam_isp_ctx_activated_state_machine_irq; + ctx_isp->substate_machine = + cam_isp_ctx_activated_state_machine; + } + + ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context; + ctx_isp->hw_ctx = param.ctxt_to_hw_map; + ctx_isp->hw_acquired = true; + ctx->ctxt_to_hw_map = param.ctxt_to_hw_map; + + trace_cam_context_state("ISP", ctx); + CAM_DBG(CAM_ISP, + "Acquire success on session_hdl 0x%xs RDI only %d ctx %u", + ctx->session_hdl, + (isp_hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id); + kfree(acquire_hw_info); return rc; free_hw: release.ctxt_to_hw_map = ctx_isp->hw_ctx; ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release); ctx_isp->hw_ctx = NULL; + ctx_isp->hw_acquired = false; free_res: - kfree(isp_res); + kfree(acquire_hw_info); end: return rc; } +static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx, + void *args) +{ + int rc = -EINVAL; + uint32_t api_version; + + if (!ctx || !args) { + CAM_ERR(CAM_ISP, "Invalid input pointer"); + return rc; + } + + api_version = *((uint32_t *)args); + if (api_version == 1) + rc = __cam_isp_ctx_acquire_hw_v1(ctx, args); + else + CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version); + + return rc; +} + static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx, struct cam_config_dev_cmd *cmd) { int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *) ctx->ctx_priv; + + if (!ctx_isp->hw_acquired) { + CAM_ERR(CAM_ISP, "HW is not acquired, reject packet"); + return -EINVAL; + } rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd); @@ -2319,7 +2563,7 @@ static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx, ctx_isp->subscribe_event = link->subscribe_event; /* change state only if we had the init config */ - if (!list_empty(&ctx->pending_req_list)) { + if (ctx_isp->init_received) { ctx->state = CAM_CTX_READY; trace_cam_context_state("ISP", ctx); } @@ -2540,8 +2784,11 @@ static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx, struct cam_start_stop_dev_cmd *cmd) { int rc = 0; + struct cam_isp_context *ctx_isp = + (struct cam_isp_context *)ctx->ctx_priv; __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd); + ctx_isp->init_received = false; ctx->state = CAM_CTX_ACQUIRED; trace_cam_context_state("ISP", ctx); return rc; @@ -2563,15 +2810,34 @@ static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx, return rc; } +static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx, + void *cmd) +{ + int rc = 0; + + rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL); + if (rc) + CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc); + + rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd); + if (rc) + CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc); + + return rc; +} + static int __cam_isp_ctx_link_pause(struct cam_context *ctx) { int rc = 0; - struct cam_isp_hw_cmd_args hw_cmd_args; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; struct cam_isp_context *ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; - hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args); @@ -2581,12 +2847,15 @@ static int __cam_isp_ctx_link_pause(struct cam_context *ctx) static int __cam_isp_ctx_link_resume(struct cam_context *ctx) { int rc = 0; - struct cam_isp_hw_cmd_args hw_cmd_args; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; struct cam_isp_context *ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; - hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args); @@ -2597,13 +2866,16 @@ static int __cam_isp_ctx_handle_sof_freeze_evt( struct cam_context *ctx) { int rc = 0; - struct cam_isp_hw_cmd_args hw_cmd_args; + struct cam_hw_cmd_args hw_cmd_args; + struct cam_isp_hw_cmd_args isp_hw_cmd_args; struct cam_isp_context *ctx_isp = (struct cam_isp_context *) ctx->ctx_priv; hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx; - hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG; - hw_cmd_args.u.sof_irq_enable = 1; + hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL; + isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG; + isp_hw_cmd_args.u.sof_irq_enable = 1; + hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args; rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv, &hw_cmd_args); @@ -2735,8 +3007,10 @@ static struct cam_ctx_ops /* Acquired */ { .ioctl_ops = { + .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired, .release_dev = __cam_isp_ctx_release_dev_in_top_state, .config_dev = __cam_isp_ctx_config_dev_in_acquired, + .release_hw = __cam_isp_ctx_release_hw_in_top_state, }, .crm_ops = { .link = __cam_isp_ctx_link_in_acquired, @@ -2745,6 +3019,7 @@ static struct cam_ctx_ops .flush_req = __cam_isp_ctx_flush_req_in_top_state, }, .irq_ops = NULL, + .pagefault_ops = cam_isp_context_dump_active_request, }, /* Ready */ { @@ -2752,12 +3027,14 @@ static struct cam_ctx_ops .start_dev = __cam_isp_ctx_start_dev_in_ready, .release_dev = __cam_isp_ctx_release_dev_in_top_state, .config_dev = __cam_isp_ctx_config_dev_in_top_state, + .release_hw = __cam_isp_ctx_release_hw_in_top_state, }, .crm_ops = { .unlink = __cam_isp_ctx_unlink_in_ready, .flush_req = __cam_isp_ctx_flush_req_in_ready, }, .irq_ops = NULL, + .pagefault_ops = cam_isp_context_dump_active_request, }, /* Activated */ { @@ -2765,6 +3042,7 @@ static struct cam_ctx_ops .stop_dev = __cam_isp_ctx_stop_dev_in_activated, .release_dev = __cam_isp_ctx_release_dev_in_activated, .config_dev = __cam_isp_ctx_config_dev_in_top_state, + .release_hw = __cam_isp_ctx_release_hw_in_activated, }, .crm_ops = { .unlink = __cam_isp_ctx_unlink_in_activated, @@ -2773,10 +3051,55 @@ static struct cam_ctx_ops .process_evt = __cam_isp_ctx_process_evt, }, .irq_ops = __cam_isp_ctx_handle_irq_in_activated, + .pagefault_ops = cam_isp_context_dump_active_request, }, }; +static int cam_isp_context_dump_active_request(void *data, unsigned long iova, + uint32_t buf_info) +{ + + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_isp_ctx_req *req_isp = NULL; + struct cam_isp_prepare_hw_update_data *hw_update_data = NULL; + struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL; + bool mem_found = false; + int rc = 0; + + struct cam_isp_context *isp_ctx = + (struct cam_isp_context *)ctx->ctx_priv; + + if (!isp_ctx) { + CAM_ERR(CAM_ISP, "Invalid isp ctx"); + return -EINVAL; + } + + CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + req_isp = (struct cam_isp_ctx_req *) req->req_priv; + hw_update_data = &req_isp->hw_update_data; + pf_dbg_entry = &(req->pf_data); + CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet, + iova, buf_info, &mem_found); + if (rc) + CAM_ERR(CAM_ISP, "Failed to dump pf info"); + + if (mem_found) + CAM_ERR(CAM_ISP, "Found page fault in req %lld %d", + req->request_id, rc); + } + + return rc; +} + int cam_isp_context_init(struct cam_isp_context *ctx, struct cam_context *ctx_base, struct cam_req_mgr_kmd_ops *crm_node_intf, diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h index 0bbd8780c9786ca91bc040620e63cc611c2211eb..5ebd82ead4704353a5b3e0860a4d0dbc0506412e 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h @@ -16,6 +16,7 @@ #include #include +#include #include "cam_context.h" #include "cam_isp_hw_mgr_intf.h" @@ -115,6 +116,7 @@ struct cam_isp_ctx_req { uint32_t num_acked; int32_t bubble_report; struct cam_isp_prepare_hw_update_data hw_update_data; + bool bubble_detected; }; /** @@ -158,6 +160,9 @@ struct cam_isp_context_state_monitor { * @cam_isp_ctx_state_monitor: State monitoring array * @rdi_only_context: Get context type information. * true, if context is rdi only context + * @hw_acquired: Indicate whether HW resources are acquired + * @init_received: Indicate whether init config packet is received + * @split_acquire: Indicate whether a separate acquire is expected * */ struct cam_isp_context { @@ -182,6 +187,9 @@ struct cam_isp_context { struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES]; bool rdi_only_context; + bool hw_acquired; + bool init_received; + bool split_acquire; }; /** diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c index 5eefcae859e008e230e33ee7083edb6052dc6ab9..fc960bfba0f264ab9084a654990ea61c028061f5 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c @@ -25,9 +25,29 @@ #include "cam_isp_hw_mgr_intf.h" #include "cam_node.h" #include "cam_debug_util.h" +#include "cam_smmu_api.h" static struct cam_isp_dev g_isp_dev; +static void cam_isp_dev_iommu_fault_handler( + struct iommu_domain *domain, struct device *dev, unsigned long iova, + int flags, void *token, uint32_t buf_info) +{ + int i = 0; + struct cam_node *node = NULL; + + if (!token) { + CAM_ERR(CAM_ISP, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)token; + + for (i = 0; i < node->ctx_size; i++) + cam_context_dump_pf_info(&(node->ctx_list[i]), iova, + buf_info); +} + static const struct of_device_id cam_isp_dt_match[] = { { .compatible = "qcom,cam-isp" @@ -35,23 +55,47 @@ static const struct of_device_id cam_isp_dt_match[] = { {} }; +static int cam_isp_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + mutex_lock(&g_isp_dev.isp_mutex); + g_isp_dev.open_cnt++; + mutex_unlock(&g_isp_dev.isp_mutex); + + return 0; +} + static int cam_isp_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { + int rc = 0; struct cam_node *node = v4l2_get_subdevdata(sd); + mutex_lock(&g_isp_dev.isp_mutex); + if (g_isp_dev.open_cnt <= 0) { + CAM_DBG(CAM_ISP, "ISP subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_isp_dev.open_cnt--; if (!node) { CAM_ERR(CAM_ISP, "Node ptr is NULL"); - return -EINVAL; + rc = -EINVAL; + goto end; } - cam_node_shutdown(node); + if (g_isp_dev.open_cnt == 0) + cam_node_shutdown(node); - return 0; +end: + mutex_unlock(&g_isp_dev.isp_mutex); + return rc; } static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = { .close = cam_isp_subdev_close, + .open = cam_isp_subdev_open, }; static int cam_isp_dev_remove(struct platform_device *pdev) @@ -81,6 +125,7 @@ static int cam_isp_dev_probe(struct platform_device *pdev) int i; struct cam_hw_mgr_intf hw_mgr_intf; struct cam_node *node; + int iommu_hdl = -1; g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops; /* Initialze the v4l2 subdevice first. (create cam_node) */ @@ -93,7 +138,7 @@ static int cam_isp_dev_probe(struct platform_device *pdev) node = (struct cam_node *) g_isp_dev.sd.token; memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf)); - rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf); + rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl); if (rc != 0) { CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!"); goto unregister; @@ -118,6 +163,9 @@ static int cam_isp_dev_probe(struct platform_device *pdev) goto unregister; } + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_isp_dev_iommu_fault_handler, node); + CAM_INFO(CAM_ISP, "Camera ISP probe complete"); return 0; diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h index 6f7d05da090105655f1a6997416b3922472ad845..a88ed5533907421d5562489f4ed102b4fb511ab3 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h @@ -24,12 +24,15 @@ * @sd: Commone camera subdevice node * @ctx: Isp base context storage * @ctx_isp: Isp private context storage - * + * @isp_mutex: ISP dev mutex + * @open_cnt: Open device count */ struct cam_isp_dev { struct cam_subdev sd; struct cam_context ctx[CAM_CTX_MAX]; struct cam_isp_context ctx_isp[CAM_CTX_MAX]; + struct mutex isp_mutex; + int32_t open_cnt; }; #endif /* __CAM_ISP_DEV_H__ */ diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c index c4c16f07b33750f1210c15af8086ef5936aebdb4..6e9627c7fc8ce3354959953ade4b53f8447c39b4 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c @@ -28,6 +28,7 @@ #include "cam_packet_util.h" #include "cam_debug_util.h" #include "cam_cpas_api.h" +#include "cam_mem_mgr_api.h" #define CAM_IFE_HW_ENTRIES_MAX 20 @@ -764,6 +765,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel( if (!ife_src_res->hw_res[j]) continue; + hw_intf = ife_src_res->hw_res[j]->hw_intf; + if (j == CAM_ISP_HW_SPLIT_LEFT) { vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT; @@ -771,7 +774,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel( /*TBD */ vfe_acquire.vfe_out.is_master = 1; vfe_acquire.vfe_out.dual_slave_core = - 1; + (hw_intf->hw_idx == 0) ? 1 : 0; } else { vfe_acquire.vfe_out.is_master = 0; vfe_acquire.vfe_out.dual_slave_core = @@ -781,10 +784,10 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel( vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_RIGHT; vfe_acquire.vfe_out.is_master = 0; - vfe_acquire.vfe_out.dual_slave_core = 0; + vfe_acquire.vfe_out.dual_slave_core = + (hw_intf->hw_idx == 0) ? 1 : 0; } - hw_intf = ife_src_res->hw_res[j]->hw_intf; rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &vfe_acquire, sizeof(struct cam_vfe_acquire_args)); @@ -990,6 +993,7 @@ static int cam_ife_mgr_acquire_cid_res( struct cam_ife_hw_mgr_res *cid_res_temp, *cid_res_iterator; struct cam_csid_hw_reserve_resource_args csid_acquire; uint32_t acquired_cnt = 0; + struct cam_isp_out_port_info *out_port = NULL; ife_hw_mgr = ife_ctx->hw_mgr; *cid_res = NULL; @@ -1007,6 +1011,9 @@ static int cam_ife_mgr_acquire_cid_res( csid_acquire.res_id = csid_path; CAM_DBG(CAM_ISP, "path %d", csid_path); + if (in_port->num_out_res) + out_port = &(in_port->data[0]); + /* Try acquiring CID resource from previously acquired HW */ list_for_each_entry(cid_res_iterator, &ife_ctx->res_list_ife_cid, list) { @@ -1015,6 +1022,12 @@ static int cam_ife_mgr_acquire_cid_res( if (!cid_res_iterator->hw_res[i]) continue; + if (cid_res_iterator->is_secure == 1 || + (cid_res_iterator->is_secure == 0 && + in_port->num_out_res && + out_port->secure_mode == 1)) + continue; + hw_intf = cid_res_iterator->hw_res[i]->hw_intf; rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire, sizeof(csid_acquire)); @@ -1054,7 +1067,7 @@ static int cam_ife_mgr_acquire_cid_res( } /* Acquire Left if not already acquired */ - for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) { + for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) { if (!ife_hw_mgr->csid_devices[i]) continue; @@ -1070,7 +1083,7 @@ static int cam_ife_mgr_acquire_cid_res( } } - if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) { + if (i == -1 || !csid_acquire.node_res) { CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d", csid_path); goto put_res; @@ -1084,6 +1097,10 @@ static int cam_ife_mgr_acquire_cid_res( /* CID(DT_ID) value of acquire device, require for path */ cid_res_temp->res_id = csid_acquire.node_res->res_id; cid_res_temp->is_dual_vfe = in_port->usage_type; + + if (in_port->num_out_res) + cid_res_temp->is_secure = out_port->secure_mode; + cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res); /* @@ -1524,8 +1541,134 @@ void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata, } /* entry function: acquire_hw */ -static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, - void *acquire_hw_args) +static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args) +{ + struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv; + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -1; + int i, j; + struct cam_ife_hw_mgr_ctx *ife_ctx; + struct cam_isp_in_port_info *in_port = NULL; + struct cam_cdm_acquire_data cdm_acquire; + uint32_t num_pix_port_per_in = 0; + uint32_t num_rdi_port_per_in = 0; + uint32_t total_pix_port = 0; + uint32_t total_rdi_port = 0; + uint32_t in_port_length = 0; + uint32_t total_in_port_length = 0; + struct cam_isp_acquire_hw_info *acquire_hw_info = NULL; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + /* get the ife ctx */ + rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); + if (rc || !ife_ctx) { + CAM_ERR(CAM_ISP, "Get ife hw context failed"); + goto err; + } + + ife_ctx->common.cb_priv = acquire_args->context_data; + for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++) + ife_ctx->common.event_cb[i] = acquire_args->event_cb; + + ife_ctx->hw_mgr = ife_hw_mgr; + + + memcpy(cdm_acquire.identifier, "ife", sizeof("ife")); + cdm_acquire.cell_index = 0; + cdm_acquire.handle = 0; + cdm_acquire.userdata = ife_ctx; + cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX; + for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) { + if (ife_hw_mgr->cdm_reg_map[i]) + cdm_acquire.base_array[j++] = + ife_hw_mgr->cdm_reg_map[i]; + } + cdm_acquire.base_array_cnt = j; + + + cdm_acquire.id = CAM_CDM_VIRTUAL; + cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback; + rc = cam_cdm_acquire(&cdm_acquire); + if (rc) { + CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW"); + goto free_ctx; + } + + CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x", + cdm_acquire.handle); + ife_ctx->cdm_handle = cdm_acquire.handle; + ife_ctx->cdm_ops = cdm_acquire.ops; + + acquire_hw_info = + (struct cam_isp_acquire_hw_info *)acquire_args->acquire_info; + in_port = (struct cam_isp_in_port_info *) + ((uint8_t *)&acquire_hw_info->data + + acquire_hw_info->input_info_offset); + + /* acquire HW resources */ + for (i = 0; i < acquire_hw_info->num_inputs; i++) { + in_port_length = sizeof(struct cam_isp_in_port_info) + + (in_port->num_out_res - 1) * + sizeof(struct cam_isp_out_port_info); + total_in_port_length += in_port_length; + + if (total_in_port_length > acquire_hw_info->input_info_size) { + CAM_ERR(CAM_ISP, "buffer size is not enough"); + rc = -EINVAL; + goto free_res; + } + rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port, + &num_pix_port_per_in, &num_rdi_port_per_in); + total_pix_port += num_pix_port_per_in; + total_rdi_port += num_rdi_port_per_in; + + if (rc) { + CAM_ERR(CAM_ISP, "can not acquire resource"); + goto free_res; + } + in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port + + in_port_length); + } + + /* Check whether context has only RDI resource */ + if (!total_pix_port) { + ife_ctx->is_rdi_only_context = 1; + CAM_DBG(CAM_ISP, "RDI only context"); + } + + /* Process base info */ + rc = cam_ife_mgr_process_base_info(ife_ctx); + if (rc) { + CAM_ERR(CAM_ISP, "Process base info failed"); + goto free_res; + } + + acquire_args->ctxt_to_hw_map = ife_ctx; + ife_ctx->ctx_in_use = 1; + + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx); + + CAM_DBG(CAM_ISP, "Exit...(success)"); + + return 0; +free_res: + cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx); + cam_cdm_release(ife_ctx->cdm_handle); +free_ctx: + cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx); +err: + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + +/* entry function: acquire_hw */ +static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args) { struct cam_ife_hw_mgr *ife_hw_mgr = hw_mgr_priv; struct cam_hw_acquire_args *acquire_args = acquire_hw_args; @@ -1671,6 +1814,30 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, return rc; } +/* entry function: acquire_hw */ +static int cam_ife_mgr_acquire(void *hw_mgr_priv, + void *acquire_hw_args) +{ + struct cam_hw_acquire_args *acquire_args = acquire_hw_args; + int rc = -1; + + CAM_DBG(CAM_ISP, "Enter..."); + + if (!acquire_args || acquire_args->num_acq <= 0) { + CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error"); + return -EINVAL; + } + + if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT) + rc = cam_ife_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args); + else + rc = cam_ife_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args); + + CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc); + return rc; +} + + static int cam_isp_blob_bw_update( struct cam_isp_bw_config *bw_config, struct cam_ife_hw_mgr_ctx *ctx) @@ -2243,7 +2410,8 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args) struct cam_isp_stop_args stop_isp; struct cam_ife_hw_mgr_ctx *ctx; struct cam_ife_hw_mgr_res *hw_mgr_res; - uint32_t i; + struct cam_isp_resource_node *rsrc_node = NULL; + uint32_t i, camif_debug; if (!hw_mgr_priv || !start_isp) { CAM_ERR(CAM_ISP, "Invalid arguments"); @@ -2277,6 +2445,24 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args) sizeof(g_ife_hw_mgr.debug_cfg.csid_debug)); } + camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug; + list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) { + for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) { + if (!hw_mgr_res->hw_res[i]) + continue; + + rsrc_node = hw_mgr_res->hw_res[i]; + if (rsrc_node->process_cmd && (rsrc_node->res_id == + CAM_ISP_HW_VFE_IN_CAMIF)) { + rc = hw_mgr_res->hw_res[i]->process_cmd( + hw_mgr_res->hw_res[i], + CAM_ISP_HW_CMD_SET_CAMIF_DEBUG, + &camif_debug, + sizeof(camif_debug)); + } + } + } + rc = cam_ife_hw_mgr_init_hw(ctx); if (rc) { CAM_ERR(CAM_ISP, "Init failed"); @@ -3053,45 +3239,137 @@ static int cam_ife_mgr_sof_irq_debug( return rc; } +static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info, + bool *mem_found) +{ + uint64_t iova_addr; + size_t src_buf_size; + int i; + int j; + int rc = 0; + int32_t mmu_hdl; + + struct cam_buf_io_cfg *io_cfg = NULL; + + if (mem_found) + *mem_found = false; + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload + + packet->io_configs_offset / 4); + + for (i = 0; i < packet->num_io_configs; i++) { + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) == + GET_FD_FROM_HANDLE(pf_buf_info)) { + CAM_INFO(CAM_ISP, + "Found PF at port: %d mem %x fd: %x", + io_cfg[i].resource_type, + io_cfg[i].mem_handle[j], + pf_buf_info); + if (mem_found) + *mem_found = true; + } + + CAM_INFO(CAM_ISP, "port: %d f: %u format: %d dir %d", + io_cfg[i].resource_type, + io_cfg[i].fence, + io_cfg[i].format, + io_cfg[i].direction); + + mmu_hdl = cam_mem_is_secure_buf( + io_cfg[i].mem_handle[j]) ? sec_mmu_hdl : + iommu_hdl; + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], + mmu_hdl, &iova_addr, &src_buf_size); + if (rc < 0) { + CAM_ERR(CAM_ISP, "get src buf address fail"); + continue; + } + if (iova_addr >> 32) { + CAM_ERR(CAM_ISP, "Invalid mapped address"); + rc = -EINVAL; + continue; + } + + CAM_INFO(CAM_ISP, + "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x", + j, io_cfg[i].planes[j].width, + io_cfg[i].planes[j].height, + (int32_t)src_buf_size, + (unsigned int)iova_addr, + io_cfg[i].offsets[j], + io_cfg[i].mem_handle[j]); + } + } +} + static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args) { int rc = 0; - struct cam_isp_hw_cmd_args *hw_cmd_args = cmd_args; - struct cam_ife_hw_mgr_ctx *ctx; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv; + struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *) + hw_cmd_args->ctxt_to_hw_map; + struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL; if (!hw_mgr_priv || !cmd_args) { CAM_ERR(CAM_ISP, "Invalid arguments"); return -EINVAL; } - ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map; if (!ctx || !ctx->ctx_in_use) { CAM_ERR(CAM_ISP, "Fatal: Invalid context is used"); return -EPERM; } switch (hw_cmd_args->cmd_type) { - case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT: - if (ctx->is_rdi_only_context) - hw_cmd_args->u.is_rdi_only_context = 1; - else - hw_cmd_args->u.is_rdi_only_context = 0; + case CAM_HW_MGR_CMD_INTERNAL: + if (!hw_cmd_args->u.internal_args) { + CAM_ERR(CAM_ISP, "Invalid cmd arguments"); + return -EINVAL; + } + isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *) + hw_cmd_args->u.internal_args; + + switch (isp_hw_cmd_args->cmd_type) { + case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT: + if (ctx->is_rdi_only_context) + isp_hw_cmd_args->u.is_rdi_only_context = 1; + else + isp_hw_cmd_args->u.is_rdi_only_context = 0; + break; + case CAM_ISP_HW_MGR_CMD_PAUSE_HW: + cam_ife_mgr_pause_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_RESUME_HW: + cam_ife_mgr_resume_hw(ctx); + break; + case CAM_ISP_HW_MGR_CMD_SOF_DEBUG: + cam_ife_mgr_sof_irq_debug(ctx, + isp_hw_cmd_args->u.sof_irq_enable); + break; + default: + CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x", + hw_cmd_args->cmd_type); + rc = -EINVAL; + break; + } break; - case CAM_ISP_HW_MGR_CMD_PAUSE_HW: - cam_ife_mgr_pause_hw(ctx); - break; - case CAM_ISP_HW_MGR_CMD_RESUME_HW: - cam_ife_mgr_resume_hw(ctx); - break; - case CAM_ISP_HW_MGR_CMD_SOF_DEBUG: - cam_ife_mgr_sof_irq_debug(ctx, hw_cmd_args->u.sof_irq_enable); + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_ife_mgr_print_io_bufs( + hw_cmd_args->u.pf_args.pf_data.packet, + hw_mgr->mgr_common.img_iommu_hdl, + hw_mgr->mgr_common.img_iommu_hdl_secure, + hw_cmd_args->u.pf_args.buf_info, + hw_cmd_args->u.pf_args.mem_found); break; default: - CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x", - hw_cmd_args->cmd_type); - rc = -EINVAL; - break; + CAM_ERR(CAM_ISP, "Invalid cmd"); } return rc; @@ -4427,6 +4705,28 @@ DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug, cam_ife_get_csid_debug, cam_ife_set_csid_debug, "%16llu"); +static int cam_ife_set_camif_debug(void *data, u64 val) +{ + g_ife_hw_mgr.debug_cfg.camif_debug = val; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", val); + return 0; +} + +static int cam_ife_get_camif_debug(void *data, u64 *val) +{ + *val = g_ife_hw_mgr.debug_cfg.camif_debug; + CAM_DBG(CAM_ISP, + "Set camif enable_diag_sensor_status value :%lld", + g_ife_hw_mgr.debug_cfg.csid_debug); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug, + cam_ife_get_camif_debug, + cam_ife_set_camif_debug, "%16llu"); + static int cam_ife_hw_mgr_debug_register(void) { g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife", @@ -4452,6 +4752,14 @@ static int cam_ife_hw_mgr_debug_register(void) CAM_ERR(CAM_ISP, "failed to create enable_recovery"); goto err; } + + if (!debugfs_create_file("ife_camif_debug", + 0644, + g_ife_hw_mgr.debug_cfg.dentry, NULL, + &cam_ife_camif_debug)) { + CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug"); + goto err; + } g_ife_hw_mgr.debug_cfg.enable_recovery = 0; return 0; @@ -4461,7 +4769,7 @@ static int cam_ife_hw_mgr_debug_register(void) return -ENOMEM; } -int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf) +int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl) { int rc = -EFAULT; int i, j; @@ -4623,7 +4931,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf) /* fill return structure */ hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr; hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps; - hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire_hw; + hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire; hw_mgr_intf->hw_start = cam_ife_mgr_start_hw; hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw; hw_mgr_intf->hw_read = cam_ife_mgr_read; @@ -4633,6 +4941,9 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf) hw_mgr_intf->hw_config = cam_ife_mgr_config_hw; hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd; + if (iommu_hdl) + *iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl; + cam_ife_hw_mgr_debug_register(); CAM_DBG(CAM_ISP, "Exit"); diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h index 0198f3d62e9cfea2860b935a109ac98a165ee859..cf1e425558f3ccba74656e3316bf8bfa566cc9ee 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h @@ -50,6 +50,7 @@ enum cam_ife_hw_mgr_res_type { * @parent: point to the parent resource node. * @children: point to the children resource nodes * @child_num: numbe of the child resource node. + * @is_secure informs whether the resource is in secure mode or not * */ struct cam_ife_hw_mgr_res { @@ -63,6 +64,7 @@ struct cam_ife_hw_mgr_res { struct cam_ife_hw_mgr_res *parent; struct cam_ife_hw_mgr_res *child[CAM_IFE_HW_OUT_RES_MAX]; uint32_t num_children; + uint32_t is_secure; }; @@ -81,15 +83,17 @@ struct ctx_base_info { /** * struct cam_ife_hw_mgr_debug - contain the debug information * - * @dentry: Debugfs entry - * @csid_debug: csid debug information - * @enable_recovery enable recovery + * @dentry: Debugfs entry + * @csid_debug: csid debug information + * @enable_recovery: enable recovery + * @enable_diag_sensor_status: enable sensor diagnosis status * */ struct cam_ife_hw_mgr_debug { struct dentry *dentry; uint64_t csid_debug; uint32_t enable_recovery; + uint32_t camif_debug; }; /** @@ -203,9 +207,10 @@ struct cam_ife_hw_mgr { * etnry functinon for the IFE HW manager. * * @hw_mgr_intf: IFE hardware manager object returned + * @iommu_hdl: Iommu handle to be returned * */ -int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf); +int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl); /** * cam_ife_mgr_do_tasklet_buf_done() diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c index d74899d0e614f04d51b837d1a861d664e777dd22..8b9c555cae17604db68f890c183f108c96a2d45d 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c @@ -16,7 +16,7 @@ int cam_isp_hw_mgr_init(struct device_node *of_node, - struct cam_hw_mgr_intf *hw_mgr) + struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl) { int rc = 0; const char *compat_str = NULL; @@ -25,7 +25,7 @@ int cam_isp_hw_mgr_init(struct device_node *of_node, (const char **)&compat_str); if (strnstr(compat_str, "ife", strlen(compat_str))) - rc = cam_ife_hw_mgr_init(hw_mgr); + rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl); else { CAM_ERR(CAM_ISP, "Invalid ISP hw type"); rc = -EINVAL; diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c index abc6bb0a6db897cd75a31fa8acdb2addfef9ab0f..807f9f602c228fdf9cd83d2d79b9cb530e324c50 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c @@ -457,6 +457,7 @@ int cam_isp_add_io_buffers( num_out_buf = 0; num_in_buf = 0; io_cfg_used_bytes = 0; + prepare->pf_data->packet = prepare->packet; /* Max one hw entries required for each base */ if (prepare->num_hw_update_entries + 1 >= diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h index fd71c37c8fa1cc34f13820bbd4ec69c7bc474e03..1586216f2073bed7b23bb459f2b700f4ee7a796b 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h @@ -203,13 +203,11 @@ enum cam_isp_hw_mgr_command { /** * struct cam_isp_hw_cmd_args - Payload for hw manager command * - * @ctxt_to_hw_map: HW context from the acquire * @cmd_type HW command type * @get_context Get context type information */ struct cam_isp_hw_cmd_args { - void *ctxt_to_hw_map; - uint32_t cmd_type; + uint32_t cmd_type; union { uint32_t is_rdi_only_context; uint32_t sof_irq_enable; @@ -225,9 +223,9 @@ struct cam_isp_hw_cmd_args { * @of_node: Device node input * @hw_mgr: Input/output structure for the ISP hardware manager * initialization - * + * @iommu_hdl: Iommu handle to be returned */ int cam_isp_hw_mgr_init(struct device_node *of_node, - struct cam_hw_mgr_intf *hw_mgr); + struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl); #endif /* __CAM_ISP_HW_MGR_INTF_H__ */ diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h index d1452f1eb0320c516ded69509de76a4158754c66..85fec0f15dcdcbe98ce29eaf035f714f66a6cd5c 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h @@ -52,6 +52,7 @@ static struct cam_ife_csid_pxl_reg_offset cam_ife_csid_170_ipp_reg_offset = { .csid_pxl_timestamp_perv1_eof_addr = 0x2ac, /* configurations */ .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, }; static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = { @@ -288,6 +289,8 @@ static struct cam_ife_csid_common_reg_offset .ipp_irq_mask_all = 0x7FFF, .rdi_irq_mask_all = 0x7FFF, .ppp_irq_mask_all = 0x0, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .format_measure_en_val = 1, }; static struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = { diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h index f30c33872cfad9bdc7f6003b582168e3de6afaa7..4b0040fa62ab51c566427510f1c56b3558b8359d 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h @@ -52,6 +52,7 @@ static struct cam_ife_csid_pxl_reg_offset cam_ife_csid_175_ipp_reg_offset = { .csid_pxl_timestamp_perv1_eof_addr = 0x2ac, /* configurations */ .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, }; static struct cam_ife_csid_pxl_reg_offset cam_ife_csid_175_ppp_reg_offset = { @@ -91,6 +92,7 @@ static struct cam_ife_csid_pxl_reg_offset cam_ife_csid_175_ppp_reg_offset = { .csid_pxl_timestamp_perv1_eof_addr = 0x7ac, /* configurations */ .pix_store_en_shift_val = 7, + .early_eof_en_shift_val = 29, }; @@ -328,6 +330,8 @@ static struct cam_ife_csid_common_reg_offset .ipp_irq_mask_all = 0x7FFF, .rdi_irq_mask_all = 0x7FFF, .ppp_irq_mask_all = 0xFFFF, + .measure_en_hbi_vbi_cnt_mask = 0xC, + .format_measure_en_val = 1, }; static struct cam_ife_csid_reg_offset cam_ife_csid_175_reg_offset = { diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c index c5e1c52b907fb7cb76e9c94e286bde2bdc1d3e9f..e820299852f1e581b0c1271827860e83c7152f9e 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c @@ -1505,8 +1505,12 @@ static int cam_ife_csid_init_config_pxl_path( cam_io_w_mb(val, soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_cfg0_addr); + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg1_addr); + /* select the post irq sub sample strobe for time stamp capture */ - cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base + + val |= CSID_TIMESTAMP_STB_POST_IRQ; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_cfg1_addr); if (path_data->crop_enable) { @@ -1525,6 +1529,15 @@ static int cam_ife_csid_init_config_pxl_path( pxl_reg->csid_pxl_vcrop_addr); CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x", csid_hw->hw_intf->hw_idx, val); + + /* Enable generating early eof strobe based on crop config */ + if (!(csid_hw->csid_debug & CSID_DEBUG_DISABLE_EARLY_EOF)) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + val |= (1 << pxl_reg->early_eof_en_shift_val); + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + } } /* set frame drop pattern to 0 and period to 1 */ @@ -1553,9 +1566,23 @@ static int cam_ife_csid_init_config_pxl_path( val = cam_io_r_mb(soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_cfg0_addr); val |= (1 << csid_reg->cmn_reg->path_en_shift_val); + + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) + val |= csid_reg->cmn_reg->format_measure_en_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + pxl_reg->csid_pxl_cfg0_addr); + /* Enable the HBI/VBI counter */ + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); + } + /* configure the rx packet capture based on csid debug set */ val = 0; if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) @@ -1594,6 +1621,7 @@ static int cam_ife_csid_deinit_pxl_path( struct cam_isp_resource_node *res) { int rc = 0; + uint32_t val; const struct cam_ife_csid_reg_offset *csid_reg; struct cam_hw_soc_info *soc_info; const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL; @@ -1624,8 +1652,25 @@ static int cam_ife_csid_deinit_pxl_path( csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP", res->res_id); rc = -EINVAL; + goto end; + } + + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + if (val & csid_reg->cmn_reg->format_measure_en_val) { + val &= ~csid_reg->cmn_reg->format_measure_en_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_cfg0_addr); + + /* Disable the HBI/VBI counter */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); + val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + pxl_reg->csid_pxl_format_measure_cfg0_addr); } +end: res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; return rc; } @@ -1794,6 +1839,7 @@ static int cam_ife_csid_init_config_rdi_path( const struct cam_ife_csid_reg_offset *csid_reg; struct cam_hw_soc_info *soc_info; uint32_t path_format = 0, plain_fmt = 0, val = 0, id; + uint32_t format_measure_addr; path_data = (struct cam_ife_csid_path_cfg *) res->res_priv; csid_reg = csid_hw->csid_info->csid_reg; @@ -1887,9 +1933,24 @@ static int cam_ife_csid_init_config_rdi_path( csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); val |= (1 << csid_reg->cmn_reg->path_en_shift_val); + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) + val |= csid_reg->cmn_reg->format_measure_en_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + format_measure_addr = + csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr; + + /* Enable the HBI/VBI counter */ + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + format_measure_addr); + val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, + soc_info->reg_map[0].mem_base + format_measure_addr); + } + /* configure the rx packet capture based on csid debug set */ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) val = ((1 << @@ -1925,7 +1986,7 @@ static int cam_ife_csid_deinit_rdi_path( struct cam_isp_resource_node *res) { int rc = 0; - uint32_t id; + uint32_t id, val, format_measure_addr; const struct cam_ife_csid_reg_offset *csid_reg; struct cam_hw_soc_info *soc_info; @@ -1942,6 +2003,24 @@ static int cam_ife_csid_deinit_rdi_path( return -EINVAL; } + format_measure_addr = + csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr; + + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + val &= ~csid_reg->cmn_reg->format_measure_en_val; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr); + + /* Disable the HBI/VBI counter */ + val = cam_io_r_mb(soc_info->reg_map[0].mem_base + + format_measure_addr); + val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask; + cam_io_w_mb(val, soc_info->reg_map[0].mem_base + + format_measure_addr); + } + res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; return rc; } @@ -2044,6 +2123,60 @@ static int cam_ife_csid_disable_rdi_path( return rc; } +static int cam_ife_csid_get_hbi_vbi( + struct cam_ife_csid_hw *csid_hw, + struct cam_isp_resource_node *res) +{ + uint32_t hbi, vbi; + const struct cam_ife_csid_reg_offset *csid_reg; + const struct cam_ife_csid_rdi_reg_offset *rdi_reg; + struct cam_hw_soc_info *soc_info; + + csid_reg = csid_hw->csid_info->csid_reg; + soc_info = &csid_hw->hw_info->soc_info; + + if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH || + res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d", + csid_hw->hw_intf->hw_idx, res->res_type, + res->res_id); + return -EINVAL; + } + + if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) { + CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d", + csid_hw->hw_intf->hw_idx, + csid_hw->hw_info->hw_state); + return -EINVAL; + } + + if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) { + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ipp_reg->csid_pxl_format_measure2_addr); + } else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) { + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ppp_reg->csid_pxl_format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + csid_reg->ppp_reg->csid_pxl_format_measure2_addr); + } else { + rdi_reg = csid_reg->rdi_reg[res->res_id]; + hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure1_addr); + vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base + + rdi_reg->csid_rdi_format_measure2_addr); + } + + CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u HBI: 0x%x", res->res_id, + hbi); + CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u VBI: 0x%x", res->res_id, + vbi); + + return 0; +} + + static int cam_ife_csid_get_time_stamp( struct cam_ife_csid_hw *csid_hw, void *cmd_args) { @@ -2708,6 +2841,7 @@ static int cam_ife_csid_process_cmd(void *hw_priv, int rc = 0; struct cam_ife_csid_hw *csid_hw; struct cam_hw_info *csid_hw_info; + struct cam_isp_resource_node *res = NULL; if (!hw_priv || !cmd_args) { CAM_ERR(CAM_ISP, "CSID: Invalid arguments"); @@ -2720,6 +2854,11 @@ static int cam_ife_csid_process_cmd(void *hw_priv, switch (cmd_type) { case CAM_IFE_CSID_CMD_GET_TIME_STAMP: rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args); + if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) { + res = ((struct cam_csid_get_time_stamp_args *) + cmd_args)->node_res; + cam_ife_csid_get_hbi_vbi(csid_hw, res); + } break; case CAM_IFE_CSID_SET_CSID_DEBUG: rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args); diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h index 730528de2796ae558fbbf7a48f79c12febd96fcf..d987f21965aeebb57896e4be145ccd7fbdc5ed50 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h @@ -76,6 +76,8 @@ #define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE BIT(4) #define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE BIT(5) #define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6) +#define CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7) +#define CSID_DEBUG_DISABLE_EARLY_EOF BIT(8) /* enum cam_csid_path_halt_mode select the path halt mode control */ enum cam_csid_path_halt_mode { @@ -135,6 +137,7 @@ struct cam_ife_csid_pxl_reg_offset { /* configuration */ uint32_t pix_store_en_shift_val; + uint32_t early_eof_en_shift_val; }; struct cam_ife_csid_rdi_reg_offset { @@ -287,6 +290,8 @@ struct cam_ife_csid_common_reg_offset { uint32_t ipp_irq_mask_all; uint32_t rdi_irq_mask_all; uint32_t ppp_irq_mask_all; + uint32_t measure_en_hbi_vbi_cnt_mask; + uint32_t format_measure_en_val; }; /** diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h index 775511434e757a6c92a76ba4af22b24cd50acbbd..6f5087d37825a61c4b111c0752af7ef3f39b1c37 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h @@ -97,6 +97,7 @@ enum cam_isp_hw_cmd_type { CAM_ISP_HW_CMD_GET_REG_DUMP, CAM_ISP_HW_CMD_UBWC_UPDATE, CAM_ISP_HW_CMD_SOF_IRQ_DEBUG, + CAM_ISP_HW_CMD_SET_CAMIF_DEBUG, CAM_ISP_HW_CMD_MAX, }; diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h index c810c1bf998cba8e3beacbfa62b3701803df6d5e..88d5b132ad3dab1b9ce27049a1c4bf7f80a02c11 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h @@ -50,6 +50,8 @@ static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = { .raw_crop_width_cfg = 0x00000CE4, .raw_crop_height_cfg = 0x00000CE8, .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, }; static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = { @@ -79,6 +81,7 @@ static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = { .eof_irq_mask = 0x00000002, .error_irq_mask0 = 0x0003FC00, .error_irq_mask1 = 0x0FFF7E80, + .enable_diagnostic_hw = 0x1, }; struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = { diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h index f3c017c20f657cfa4dbb743532d89454ec3a182c..a409b32eb033318b4fe7b5385b952fa47fd73b0f 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h @@ -51,6 +51,8 @@ static struct cam_vfe_camif_ver2_reg vfe175_camif_reg = { .raw_crop_width_cfg = 0x00000CE4, .raw_crop_height_cfg = 0x00000CE8, .reg_update_cmd = 0x000004AC, + .vfe_diag_config = 0x00000C48, + .vfe_diag_sensor_status = 0x00000C4C, }; static struct cam_vfe_camif_reg_data vfe_175_camif_reg_data = { @@ -80,6 +82,7 @@ static struct cam_vfe_camif_reg_data vfe_175_camif_reg_data = { .eof_irq_mask = 0x00000002, .error_irq_mask0 = 0x0003FC00, .error_irq_mask1 = 0xEFFF7E80, + .enable_diagnostic_hw = 0x1, }; static struct cam_vfe_camif_lite_ver2_reg vfe175_camif_lite_reg = { diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c index 1da2d2e9588ea30a05498275aa102f49cad67497..fc257ecaa604d9f1240ea6282564ea6a02a91c95 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c @@ -43,6 +43,7 @@ struct cam_vfe_mux_camif_data { uint32_t last_line; bool enable_sof_irq_debug; uint32_t irq_debug_cnt; + uint32_t camif_debug; }; static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern) @@ -309,6 +310,15 @@ static int cam_vfe_camif_resource_start( rsrc_data->enable_sof_irq_debug = false; rsrc_data->irq_debug_cnt = 0; + if (rsrc_data->camif_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r_mb(rsrc_data->mem_base + + rsrc_data->camif_reg->vfe_diag_config); + val |= rsrc_data->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, rsrc_data->mem_base + + rsrc_data->camif_reg->vfe_diag_config); + } + CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx); return 0; } @@ -400,6 +410,14 @@ static int cam_vfe_camif_resource_stop( if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED; + val = cam_io_r_mb(camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_config); + if (val & camif_priv->reg_data->enable_diagnostic_hw) { + val &= ~camif_priv->reg_data->enable_diagnostic_hw; + cam_io_w_mb(val, camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_config); + } + return rc; } @@ -424,6 +442,7 @@ static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node, uint32_t cmd_type, void *cmd_args, uint32_t arg_size) { int rc = -EINVAL; + struct cam_vfe_mux_camif_data *camif_priv = NULL; if (!rsrc_node || !cmd_args) { CAM_ERR(CAM_ISP, "Invalid input arguments"); @@ -441,6 +460,11 @@ static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node, case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG: rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args); break; + case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG: + camif_priv = + (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv; + camif_priv->camif_debug = *((uint32_t *)cmd_args); + break; default: CAM_ERR(CAM_ISP, "unsupported process command:%d", cmd_type); @@ -465,6 +489,7 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv, struct cam_vfe_top_irq_evt_payload *payload; uint32_t irq_status0; uint32_t irq_status1; + uint32_t val; if (!handler_priv || !evt_payload_priv) { CAM_ERR(CAM_ISP, "Invalid params"); @@ -527,6 +552,14 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv, } else { ret = CAM_ISP_HW_ERROR_NONE; } + + if (camif_priv->camif_debug & + CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) { + val = cam_io_r(camif_priv->mem_base + + camif_priv->camif_reg->vfe_diag_sensor_status); + CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x", + camif_priv->mem_base, val); + } break; default: break; diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h index 2253cda226345e21457dd0a4b00be42fad027da2..7a6958930caad2fc1818b0c6fd998ba44221cca3 100644 --- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h +++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h @@ -16,6 +16,11 @@ #include "cam_isp_hw.h" #include "cam_vfe_top.h" +/* + * Debug values for camif module + */ +#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS BIT(0) + struct cam_vfe_camif_ver2_reg { uint32_t camif_cmd; uint32_t camif_config; @@ -27,6 +32,8 @@ struct cam_vfe_camif_ver2_reg { uint32_t raw_crop_width_cfg; uint32_t raw_crop_height_cfg; uint32_t reg_update_cmd; + uint32_t vfe_diag_config; + uint32_t vfe_diag_sensor_status; }; struct cam_vfe_camif_reg_data { @@ -63,6 +70,8 @@ struct cam_vfe_camif_reg_data { uint32_t eof_irq_mask; uint32_t error_irq_mask0; uint32_t error_irq_mask1; + + uint32_t enable_diagnostic_hw; }; struct cam_vfe_camif_ver2_hw_info { diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c index 02334a4e819588bb11ad15c5db5ffeb98fb5036f..287d4a4162fc43dfa33dc3945e2c54495731a8d7 100644 --- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c +++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c @@ -20,9 +20,49 @@ #include "cam_jpeg_context.h" #include "cam_context_utils.h" #include "cam_debug_util.h" +#include "cam_packet_util.h" static const char jpeg_dev_name[] = "jpeg"; +static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova, + uint32_t buf_info) +{ + + struct cam_context *ctx = (struct cam_context *)data; + struct cam_ctx_request *req = NULL; + struct cam_ctx_request *req_temp = NULL; + struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL; + int rc = 0; + int closest_port; + bool b_mem_found = false; + + + if (!ctx) { + CAM_ERR(CAM_JPEG, "Invalid ctx"); + return -EINVAL; + } + + CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d", + ctx->ctx_id, ctx->state); + + list_for_each_entry_safe(req, req_temp, + &ctx->active_req_list, list) { + pf_dbg_entry = &(req->pf_data); + closest_port = -1; + CAM_INFO(CAM_JPEG, "req_id : %lld ", req->request_id); + + rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet, + iova, buf_info, &b_mem_found); + if (rc) + CAM_ERR(CAM_JPEG, "Failed to dump pf info"); + + if (b_mem_found) + CAM_ERR(CAM_JPEG, "Found page fault in req %lld %d", + req->request_id, rc); + } + return rc; +} + static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx, struct cam_acquire_dev_cmd *cmd) { @@ -116,6 +156,7 @@ static struct cam_ctx_ops }, .crm_ops = { }, .irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired, + .pagefault_ops = cam_jpeg_context_dump_active_request, }, }; diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c index 46cc08f7ea5fea95509ff0a8e1971aea38397dbe..14892224e41233a55b0f3acf96d7907bdcadbdfc 100644 --- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c +++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c @@ -22,11 +22,31 @@ #include "cam_jpeg_hw_mgr_intf.h" #include "cam_jpeg_dev.h" #include "cam_debug_util.h" +#include "cam_smmu_api.h" #define CAM_JPEG_DEV_NAME "cam-jpeg" static struct cam_jpeg_dev g_jpeg_dev; +static void cam_jpeg_dev_iommu_fault_handler( + struct iommu_domain *domain, struct device *dev, unsigned long iova, + int flags, void *token, uint32_t buf_info) +{ + int i = 0; + struct cam_node *node = NULL; + + if (!token) { + CAM_ERR(CAM_JPEG, "invalid token in page handler cb"); + return; + } + + node = (struct cam_node *)token; + + for (i = 0; i < node->ctx_size; i++) + cam_context_dump_pf_info(&(node->ctx_list[i]), iova, + buf_info); +} + static const struct of_device_id cam_jpeg_dt_match[] = { { .compatible = "qcom,cam-jpeg" @@ -34,23 +54,50 @@ static const struct of_device_id cam_jpeg_dt_match[] = { { } }; +static int cam_jpeg_subdev_open(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + + mutex_lock(&g_jpeg_dev.jpeg_mutex); + g_jpeg_dev.open_cnt++; + mutex_unlock(&g_jpeg_dev.jpeg_mutex); + + return 0; +} + static int cam_jpeg_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { + int rc = 0; struct cam_node *node = v4l2_get_subdevdata(sd); + + mutex_lock(&g_jpeg_dev.jpeg_mutex); + if (g_jpeg_dev.open_cnt <= 0) { + CAM_DBG(CAM_JPEG, "JPEG subdev is already closed"); + rc = -EINVAL; + goto end; + } + + g_jpeg_dev.open_cnt--; + if (!node) { CAM_ERR(CAM_JPEG, "Node ptr is NULL"); - return -EINVAL; + rc = -EINVAL; + goto end; } - cam_node_shutdown(node); + if (g_jpeg_dev.open_cnt == 0) + cam_node_shutdown(node); - return 0; +end: + mutex_unlock(&g_jpeg_dev.jpeg_mutex); + return rc; } static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = { .close = cam_jpeg_subdev_close, + .open = cam_jpeg_subdev_open, }; static int cam_jpeg_dev_remove(struct platform_device *pdev) @@ -78,6 +125,7 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev) int i; struct cam_hw_mgr_intf hw_mgr_intf; struct cam_node *node; + int iommu_hdl = -1; g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops; rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME, @@ -89,7 +137,7 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev) node = (struct cam_node *)g_jpeg_dev.sd.token; rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node, - (uint64_t *)&hw_mgr_intf); + (uint64_t *)&hw_mgr_intf, &iommu_hdl); if (rc) { CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc); goto unregister; @@ -114,6 +162,9 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev) goto ctx_init_fail; } + cam_smmu_set_client_page_fault_handler(iommu_hdl, + cam_jpeg_dev_iommu_fault_handler, node); + mutex_init(&g_jpeg_dev.jpeg_mutex); CAM_INFO(CAM_JPEG, "Camera JPEG probe complete"); diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h index 4054287234a50af6d40bb1abfd788ca5598aa50e..0d15ced16e801485c53be61533a86869d6b0b8e5 100644 --- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h +++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h @@ -26,6 +26,7 @@ * @ctx: JPEG base context storage * @ctx_jpeg: JPEG private context storage * @jpeg_mutex: Jpeg dev mutex + * @open_cnt: Open device count */ struct cam_jpeg_dev { struct cam_subdev sd; @@ -33,5 +34,6 @@ struct cam_jpeg_dev { struct cam_context ctx[CAM_CTX_MAX]; struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX]; struct mutex jpeg_mutex; + int32_t open_cnt; }; #endif /* __CAM_JPEG_DEV_H__ */ diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c index 74e0dacb4bf36f357a53855019f9f7d4503fc259..4c47b38d97052a51a479a186986f9366feac3397 100644 --- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c @@ -600,6 +600,74 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) return rc; } +static void cam_jpeg_mgr_print_io_bufs(struct cam_packet *packet, + int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info, + bool *mem_found) +{ + uint64_t iova_addr; + size_t src_buf_size; + int i; + int j; + int rc = 0; + int32_t mmu_hdl; + struct cam_buf_io_cfg *io_cfg = NULL; + + if (mem_found) + *mem_found = false; + + io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload + + packet->io_configs_offset / 4); + + for (i = 0; i < packet->num_io_configs; i++) { + for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) { + if (!io_cfg[i].mem_handle[j]) + break; + + if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) == + GET_FD_FROM_HANDLE(pf_buf_info)) { + CAM_INFO(CAM_JPEG, + "Found PF at port: %d mem %x fd: %x", + io_cfg[i].resource_type, + io_cfg[i].mem_handle[j], + pf_buf_info); + if (mem_found) + *mem_found = true; + } + + CAM_INFO(CAM_JPEG, "port: %d f: %u format: %d dir %d", + io_cfg[i].resource_type, + io_cfg[i].fence, + io_cfg[i].format, + io_cfg[i].direction); + + mmu_hdl = cam_mem_is_secure_buf( + io_cfg[i].mem_handle[j]) ? sec_mmu_hdl : + iommu_hdl; + rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], + mmu_hdl, &iova_addr, &src_buf_size); + if (rc < 0) { + CAM_ERR(CAM_UTIL, "get src buf address fail"); + continue; + } + if (iova_addr >> 32) { + CAM_ERR(CAM_JPEG, "Invalid mapped address"); + rc = -EINVAL; + continue; + } + + CAM_INFO(CAM_JPEG, + "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x", + j, io_cfg[i].planes[j].width, + io_cfg[i].planes[j].height, + (int32_t)src_buf_size, + (unsigned int)iova_addr, + io_cfg[i].offsets[j], + io_cfg[i].mem_handle[j]); + + iova_addr += io_cfg[i].offsets[j]; + } + } +} static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv, void *prepare_hw_update_args) @@ -675,6 +743,7 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv, CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu", (void *)packet, (void *)io_cfg_ptr, sizeof(struct cam_buf_io_cfg)); + prepare_args->pf_data->packet = packet; prepare_args->num_out_map_entries = 0; @@ -1410,7 +1479,35 @@ static int cam_jpeg_init_devices(struct device_node *of_node, return rc; } -int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) +static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args) +{ + int rc = 0; + struct cam_hw_cmd_args *hw_cmd_args = cmd_args; + struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv; + + if (!hw_mgr_priv || !cmd_args) { + CAM_ERR(CAM_JPEG, "Invalid arguments"); + return -EINVAL; + } + + switch (hw_cmd_args->cmd_type) { + case CAM_HW_MGR_CMD_DUMP_PF_INFO: + cam_jpeg_mgr_print_io_bufs( + hw_cmd_args->u.pf_args.pf_data.packet, + hw_mgr->iommu_hdl, + hw_mgr->iommu_sec_hdl, + hw_cmd_args->u.pf_args.buf_info, + hw_cmd_args->u.pf_args.mem_found); + break; + default: + CAM_ERR(CAM_JPEG, "Invalid cmd"); + } + + return rc; +} + +int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl, + int *iommu_hdl) { int i, rc; uint32_t num_dev; @@ -1434,6 +1531,7 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw; hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush; hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop; + hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd; mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex); spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock); @@ -1495,6 +1593,9 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl) goto cdm_iommu_failed; } + if (iommu_hdl) + *iommu_hdl = g_jpeg_hw_mgr.iommu_hdl; + return rc; cdm_iommu_failed: diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h index f8f4fca70492d71b9b87aa3b84f74bd3a07162dc..5705890cd1092cd7fc672bf2009a1bd4f620f20c 100644 --- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h +++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h @@ -17,8 +17,7 @@ #include #include - int cam_jpeg_hw_mgr_init(struct device_node *of_node, - uint64_t *hw_mgr_hdl); + uint64_t *hw_mgr_hdl, int *iommu_hdl); #endif /* CAM_JPEG_HW_MGR_INTF_H */ diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c index a4ee1040e4c86388a73cef7050f2c1660778eed4..6b1250aea71453d162831bd0702cf232fbfa64f2 100644 --- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c +++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c @@ -81,6 +81,7 @@ static int cam_lrme_dev_open(struct v4l2_subdev *sd, static int cam_lrme_dev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { + int rc = 0; struct cam_lrme_dev *lrme_dev = g_lrme_dev; struct cam_node *node = v4l2_get_subdevdata(sd); @@ -90,18 +91,25 @@ static int cam_lrme_dev_close(struct v4l2_subdev *sd, } mutex_lock(&lrme_dev->lock); - lrme_dev->open_cnt--; - mutex_unlock(&lrme_dev->lock); + if (lrme_dev->open_cnt <= 0) { + CAM_DBG(CAM_LRME, "LRME subdev is already closed"); + rc = -EINVAL; + goto end; + } + lrme_dev->open_cnt--; if (!node) { CAM_ERR(CAM_LRME, "Node is NULL"); - return -EINVAL; + rc = -EINVAL; + goto end; } if (lrme_dev->open_cnt == 0) cam_node_shutdown(node); - return 0; +end: + mutex_unlock(&lrme_dev->lock); + return rc; } static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = { diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c index 6e0093364390d756238df2076b72201b775a61d8..f47dea8d42f7d907d979b6407c65daf6004d580c 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c @@ -287,7 +287,7 @@ EXPORT_SYMBOL(cam_mem_get_cpu_buf); int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd) { int rc = 0, idx; - uint32_t ion_cache_ops; + uint32_t cache_dir; unsigned long dmabuf_flag = 0; if (!cmd) @@ -301,39 +301,57 @@ int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd) if (!tbl.bufq[idx].active) { rc = -EINVAL; - goto fail; + goto end; } if (cmd->buf_handle != tbl.bufq[idx].buf_handle) { rc = -EINVAL; - goto fail; + goto end; } rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag); if (rc) { CAM_ERR(CAM_MEM, "cache get flags failed %d", rc); - goto fail; + goto end; } if (dmabuf_flag & ION_FLAG_CACHED) { switch (cmd->mem_cache_ops) { case CAM_MEM_CLEAN_CACHE: - ion_cache_ops = 1; + cache_dir = DMA_FROM_DEVICE; break; case CAM_MEM_INV_CACHE: - ion_cache_ops = 2; + cache_dir = DMA_TO_DEVICE; break; case CAM_MEM_CLEAN_INV_CACHE: - ion_cache_ops = 3; + cache_dir = DMA_BIDIRECTIONAL; break; default: CAM_ERR(CAM_MEM, "invalid cache ops :%d", cmd->mem_cache_ops); rc = -EINVAL; - goto fail; + goto end; } + } else { + CAM_DBG(CAM_MEM, "BUF is not cached"); + goto end; } -fail: + + rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf, + cache_dir); + if (rc) { + CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc); + goto end; + } + + rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf, + cache_dir); + if (rc) { + CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc); + goto end; + } + +end: mutex_unlock(&tbl.bufq[idx].q_lock); return rc; } @@ -368,7 +386,7 @@ static int cam_mem_util_get_dma_buf_fd(size_t len, int rc = 0; if (!buf || !fd) { - CAM_ERR(CAM_MEM, "Invalid params"); + CAM_ERR(CAM_MEM, "Invalid params, buf=%pK, fd=%pK", buf, fd); return -EINVAL; } @@ -378,7 +396,7 @@ static int cam_mem_util_get_dma_buf_fd(size_t len, *fd = dma_buf_fd(*buf, O_CLOEXEC); if (*fd < 0) { - CAM_ERR(CAM_MEM, "get fd fail"); + CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd); rc = -EINVAL; goto get_fd_fail; } @@ -429,13 +447,8 @@ static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd, } -static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd) +static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd) { - if (!cmd->flags) { - CAM_ERR(CAM_MEM, "Invalid flags"); - return -EINVAL; - } - if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) { CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)", CAM_MEM_MMU_MAX_HANDLE); @@ -459,14 +472,16 @@ static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd) } if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) { - CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)", - CAM_MEM_MMU_MAX_HANDLE); + CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)", + cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE); return -EINVAL; } if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE && cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) { - CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed"); + CAM_ERR(CAM_MEM, + "Kernel mapping in secure mode not allowed, flags=0x%x", + cmd->flags); return -EINVAL; } @@ -492,12 +507,13 @@ static int cam_mem_util_map_hw_va(uint32_t flags, int dir = cam_mem_util_get_dma_dir(flags); if (dir < 0) { - CAM_ERR(CAM_MEM, "fail to map DMA direction"); + CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir); return dir; } - CAM_DBG(CAM_MEM, "map_hw_va : flags = %x, dir=%d, num_hdls=%d", - flags, dir, num_hdls); + CAM_DBG(CAM_MEM, + "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d", + fd, flags, dir, num_hdls); if (flags & CAM_MEM_FLAG_PROTECTED_MODE) { for (i = 0; i < num_hdls; i++) { @@ -509,7 +525,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags, if (rc < 0) { CAM_ERR(CAM_MEM, - "Failed to securely map to smmu"); + "Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d", + i, fd, dir, mmu_hdls[i], rc); goto multi_map_fail; } } @@ -523,7 +540,9 @@ static int cam_mem_util_map_hw_va(uint32_t flags, region); if (rc < 0) { - CAM_ERR(CAM_MEM, "Failed to map to smmu"); + CAM_ERR(CAM_MEM, + "Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d", + i, fd, dir, mmu_hdls[i], region, rc); goto multi_map_fail; } } @@ -558,9 +577,10 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) } len = cmd->len; - rc = cam_mem_util_check_flags(cmd); + rc = cam_mem_util_check_alloc_flags(cmd); if (rc) { - CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags); + CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d", + cmd->flags, rc); return rc; } @@ -568,12 +588,15 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) &dmabuf, &fd); if (rc) { - CAM_ERR(CAM_MEM, "Ion allocation failed"); + CAM_ERR(CAM_MEM, + "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d", + cmd->len, cmd->align, cmd->flags, cmd->num_hdl); return rc; } idx = cam_mem_get_slot(); if (idx < 0) { + CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx); rc = -ENOMEM; goto slot_fail; } @@ -600,7 +623,9 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) region); if (rc) { - CAM_ERR(CAM_MEM, "Failed in map_hw_va, rc=%d", rc); + CAM_ERR(CAM_MEM, + "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d", + cmd->flags, fd, region, cmd->num_hdl, rc); goto map_hw_fail; } } @@ -631,8 +656,9 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) cmd->out.fd = tbl.bufq[idx].fd; cmd->out.vaddr = 0; - CAM_DBG(CAM_MEM, "buf handle: %x, fd: %d, len: %zu", - cmd->out.buf_handle, cmd->out.fd, + CAM_DBG(CAM_MEM, + "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu", + cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle, tbl.bufq[idx].len); return rc; @@ -657,8 +683,11 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) return -EINVAL; } - if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) + if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) { + CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)", + cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE); return -EINVAL; + } rc = cam_mem_util_check_map_flags(cmd); if (rc) { @@ -681,8 +710,13 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) &hw_vaddr, &len, CAM_SMMU_REGION_IO); - if (rc) + if (rc) { + CAM_ERR(CAM_MEM, + "Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d", + cmd->flags, cmd->fd, CAM_SMMU_REGION_IO, + cmd->num_hdl, rc); goto map_fail; + } } idx = cam_mem_get_slot(); @@ -716,6 +750,11 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) cmd->out.buf_handle = tbl.bufq[idx].buf_handle; cmd->out.vaddr = 0; + CAM_DBG(CAM_MEM, + "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu", + cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle, + tbl.bufq[idx].len); + return rc; map_fail: @@ -745,14 +784,18 @@ static int cam_mem_util_unmap_hw_va(int32_t idx, fd = tbl.bufq[idx].fd; CAM_DBG(CAM_MEM, - "unmap_hw_va : fd=%x, flags=0x%x, num_hdls=%d, client=%d", - fd, flags, num_hdls, client); + "unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d", + idx, fd, flags, num_hdls, client); if (flags & CAM_MEM_FLAG_PROTECTED_MODE) { for (i = 0; i < num_hdls; i++) { rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd); - if (rc < 0) + if (rc < 0) { + CAM_ERR(CAM_MEM, + "Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d", + i, fd, mmu_hdls[i], rc); goto unmap_end; + } } } else { for (i = 0; i < num_hdls; i++) { @@ -768,8 +811,12 @@ static int cam_mem_util_unmap_hw_va(int32_t idx, client); rc = -EINVAL; } - if (rc < 0) + if (rc < 0) { + CAM_ERR(CAM_MEM, + "Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d", + i, fd, mmu_hdls[i], region, rc); goto unmap_end; + } } } @@ -941,7 +988,8 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd) idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle); if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) { - CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle"); + CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle", + idx); return -EINVAL; } @@ -952,11 +1000,12 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd) if (tbl.bufq[idx].buf_handle != cmd->buf_handle) { CAM_ERR(CAM_MEM, - "Released buf handle not matching within table"); + "Released buf handle %d not matching within table %d, idx=%d", + cmd->buf_handle, tbl.bufq[idx].buf_handle, idx); return -EINVAL; } - CAM_DBG(CAM_MEM, "Releasing hdl = %x", cmd->buf_handle); + CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx); rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER); return rc; diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c index d11e1c74e5e30c6c40ec5f8d42b1985dda7bcd80..a43865e64c3d47ab65e14f94a7d623d28ac8f600 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c @@ -60,52 +60,6 @@ static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq) return rc; } -/** - * __cam_req_mgr_reset_req_tbl() - * - * @brief : Initialize req table data - * @in_q : request queue pointer - * - * @return: 0 for success, negative for failure - * - */ -static int __cam_req_mgr_print_req_tbl(struct cam_req_mgr_req_data *req) -{ - int rc = 0; - int32_t i = 0; - struct cam_req_mgr_req_queue *in_q = req->in_q; - struct cam_req_mgr_req_tbl *req_tbl = req->l_tbl; - - if (!in_q || !req_tbl) { - CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl); - return -EINVAL; - } - CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots); - mutex_lock(&req->lock); - for (i = 0; i < in_q->num_slots; i++) { - CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i, - in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i)); - } - - while (req_tbl != NULL) { - for (i = 0; i < req_tbl->num_slots; i++) { - CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d", - req_tbl->slot[i].idx, - req_tbl->slot[i].req_ready_map, - req_tbl->slot[i].state); - } - CAM_DBG(CAM_CRM, - "TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d", - req_tbl->id, req_tbl->pd, req_tbl->dev_count, - req_tbl->dev_mask, req_tbl->skip_traverse, - req_tbl->num_slots); - req_tbl = req_tbl->next; - } - mutex_unlock(&req->lock); - - return rc; -} - /** * __cam_req_mgr_find_pd_tbl() * @@ -1256,12 +1210,12 @@ static void __cam_req_mgr_destroy_subdev( /** * __cam_req_mgr_destroy_link_info() * - * @brief : Cleans up the mem allocated while linking - * @link : pointer to link, mem associated with this link is freed + * @brief : Unlinks all devices on the link + * @link : pointer to link * * @return : returns if unlink for any device was success or failure */ -static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) +static int __cam_req_mgr_disconnect_link(struct cam_req_mgr_core_link *link) { int32_t i = 0; struct cam_req_mgr_connected_device *dev; @@ -1276,21 +1230,34 @@ static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) /* Using device ops unlink devices */ for (i = 0; i < link->num_devs; i++) { dev = &link->l_dev[i]; - if (dev != NULL) { - link_data.dev_hdl = dev->dev_hdl; - if (dev->ops && dev->ops->link_setup) { - rc = dev->ops->link_setup(&link_data); - if (rc) - CAM_ERR(CAM_CRM, - "Unlink failed dev name %s hdl %x", - dev->dev_info.name, - dev->dev_hdl); - } - dev->dev_hdl = 0; - dev->parent = NULL; - dev->ops = NULL; + if (dev == NULL) + continue; + + link_data.dev_hdl = dev->dev_hdl; + if (dev->ops && dev->ops->link_setup) { + rc = dev->ops->link_setup(&link_data); + if (rc) + CAM_ERR(CAM_CRM, + "Unlink failed dev name %s hdl %x", + dev->dev_info.name, + dev->dev_hdl); } + dev->dev_hdl = 0; + dev->parent = NULL; + dev->ops = NULL; } + + return rc; +} + +/** + * __cam_req_mgr_destroy_link_info() + * + * @brief : Cleans up the mem allocated while linking + * @link : pointer to link, mem associated with this link is freed + */ +static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) +{ __cam_req_mgr_destroy_all_tbl(&link->req.l_tbl); __cam_req_mgr_reset_in_q(&link->req); link->req.num_tbl = 0; @@ -1299,8 +1266,6 @@ static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link) link->pd_mask = 0; link->num_devs = 0; link->max_delay = 0; - - return rc; } /** @@ -2320,27 +2285,24 @@ static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link) { int rc; - mutex_lock(&link->lock); spin_lock_bh(&link->link_state_spin_lock); link->state = CAM_CRM_LINK_STATE_IDLE; + spin_unlock_bh(&link->link_state_spin_lock); + rc = __cam_req_mgr_disconnect_link(link); + if (rc) + CAM_ERR(CAM_CORE, + "Unlink for all devices was not successful"); + + mutex_lock(&link->lock); /* Destroy timer of link */ crm_timer_exit(&link->watchdog); - spin_unlock_bh(&link->link_state_spin_lock); - __cam_req_mgr_print_req_tbl(&link->req); - - /* Destroy workq payload data */ - kfree(link->workq->task.pool[0].payload); - link->workq->task.pool[0].payload = NULL; /* Destroy workq of link */ cam_req_mgr_workq_destroy(&link->workq); /* Cleanup request tables and unlink devices */ - rc = __cam_req_mgr_destroy_link_info(link); - if (rc) - CAM_ERR(CAM_CORE, - "Unlink for all devices was not successful"); + __cam_req_mgr_destroy_link_info(link); /* Free memory holding data of linked devs */ __cam_req_mgr_destroy_subdev(link->l_dev); @@ -2378,7 +2340,6 @@ int cam_req_mgr_destroy_session( goto end; } - mutex_lock(&cam_session->lock); if (cam_session->num_links) { CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d", ses_info->session_hdl, @@ -2396,7 +2357,6 @@ int cam_req_mgr_destroy_session( } } list_del(&cam_session->entry); - mutex_unlock(&cam_session->lock); mutex_destroy(&cam_session->lock); kfree(cam_session); diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c index 0d21064afed768a47805310c9297bbc597883085..1e6de6a4672c91c99b4dc5103922e26742d629ae 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c @@ -153,6 +153,10 @@ static unsigned int cam_req_mgr_poll(struct file *f, static int cam_req_mgr_close(struct file *filep) { + struct v4l2_subdev *sd; + struct v4l2_fh *vfh = filep->private_data; + struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); + mutex_lock(&g_dev.cam_lock); if (g_dev.open_cnt <= 0) { @@ -161,6 +165,17 @@ static int cam_req_mgr_close(struct file *filep) } cam_req_mgr_handle_core_shutdown(); + + list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) { + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) + continue; + if (sd->internal_ops && sd->internal_ops->close) { + CAM_DBG(CAM_CRM, "Invoke subdev close for device %s", + sd->name); + sd->internal_ops->close(sd, subdev_fh); + } + } + g_dev.open_cnt--; v4l2_fh_release(filep); @@ -221,14 +236,15 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&ses_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_session_info))) { return -EFAULT; } rc = cam_req_mgr_create_session(&ses_info); if (!rc) if (copy_to_user((void *)k_ioctl->handle, - &ses_info, k_ioctl->size)) + &ses_info, + sizeof(struct cam_req_mgr_session_info))) rc = -EFAULT; } break; @@ -241,7 +257,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&ses_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_session_info))) { return -EFAULT; } @@ -257,14 +273,15 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&link_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_link_info))) { return -EFAULT; } rc = cam_req_mgr_link(&link_info); if (!rc) if (copy_to_user((void *)k_ioctl->handle, - &link_info, k_ioctl->size)) + &link_info, + sizeof(struct cam_req_mgr_link_info))) rc = -EFAULT; } break; @@ -277,7 +294,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&unlink_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_unlink_info))) { return -EFAULT; } @@ -293,7 +310,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&sched_req, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_sched_request))) { return -EFAULT; } @@ -309,7 +326,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&flush_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_flush_info))) { return -EFAULT; } @@ -325,7 +342,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&sync_info, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_sync_mode))) { return -EFAULT; } @@ -340,7 +357,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&cmd, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_mem_mgr_alloc_cmd))) { rc = -EFAULT; break; } @@ -348,7 +365,7 @@ static long cam_private_ioctl(struct file *file, void *fh, rc = cam_mem_mgr_alloc_and_map(&cmd); if (!rc) if (copy_to_user((void *)k_ioctl->handle, - &cmd, k_ioctl->size)) { + &cmd, sizeof(struct cam_mem_mgr_alloc_cmd))) { rc = -EFAULT; break; } @@ -362,7 +379,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&cmd, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_mem_mgr_map_cmd))) { rc = -EFAULT; break; } @@ -370,7 +387,7 @@ static long cam_private_ioctl(struct file *file, void *fh, rc = cam_mem_mgr_map(&cmd); if (!rc) if (copy_to_user((void *)k_ioctl->handle, - &cmd, k_ioctl->size)) { + &cmd, sizeof(struct cam_mem_mgr_map_cmd))) { rc = -EFAULT; break; } @@ -384,7 +401,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&cmd, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_mem_mgr_release_cmd))) { rc = -EFAULT; break; } @@ -400,7 +417,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&cmd, (void *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_mem_cache_ops_cmd))) { rc = -EFAULT; break; } @@ -418,7 +435,7 @@ static long cam_private_ioctl(struct file *file, void *fh, if (copy_from_user(&cmd, (void __user *)k_ioctl->handle, - k_ioctl->size)) { + sizeof(struct cam_req_mgr_link_control))) { rc = -EFAULT; break; } diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c index 55b78f5167710085d30426e40aab9bb1863a5864..aeaa1e966139cf4add2af64ee5b01a1dec7862ef 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c @@ -262,8 +262,13 @@ void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq) (*crm_workq)->job = NULL; WORKQ_RELEASE_LOCK(*crm_workq, flags); destroy_workqueue(job); - } else + } else { WORKQ_RELEASE_LOCK(*crm_workq, flags); + } + + /* Destroy workq payload data */ + kfree((*crm_workq)->task.pool[0].payload); + (*crm_workq)->task.pool[0].payload = NULL; kfree((*crm_workq)->task.pool); kfree(*crm_workq); *crm_workq = NULL; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c index ed0a26b70effc1c7c37f2402b146531fff45e32e..31d9d35062bdcd76964cd622b4c4c2ec2d0275bd 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c @@ -141,7 +141,7 @@ static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl) CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info); return -EINVAL; } - rc = msm_camera_power_down(power_info, soc_info); + rc = cam_sensor_util_power_down(power_info, soc_info); if (rc) { CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc); return rc; @@ -301,7 +301,7 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply) trace_cam_apply_req("Actuator", apply->request_id); CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id); - + mutex_lock(&(a_ctrl->actuator_mutex)); if ((apply->request_id == a_ctrl->i2c_data.per_frame[request_id].request_id) && (a_ctrl->i2c_data.per_frame[request_id].is_settings_valid) @@ -312,7 +312,7 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply) CAM_ERR(CAM_ACTUATOR, "Failed in applying the request: %lld\n", apply->request_id); - return rc; + goto release_mutex; } } del_req_id = (request_id + @@ -327,12 +327,14 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply) CAM_ERR(CAM_ACTUATOR, "Fail deleting the req: %d err: %d\n", del_req_id, rc); - return rc; + goto release_mutex; } } else { CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up"); } +release_mutex: + mutex_unlock(&(a_ctrl->actuator_mutex)); return rc; } @@ -352,6 +354,8 @@ int32_t cam_actuator_establish_link( CAM_ERR(CAM_ACTUATOR, "Device data is NULL"); return -EINVAL; } + + mutex_lock(&(a_ctrl->actuator_mutex)); if (link->link_enable) { a_ctrl->bridge_intf.link_hdl = link->link_hdl; a_ctrl->bridge_intf.crm_cb = link->crm_cb; @@ -359,6 +363,7 @@ int32_t cam_actuator_establish_link( a_ctrl->bridge_intf.link_hdl = -1; a_ctrl->bridge_intf.crm_cb = NULL; } + mutex_unlock(&(a_ctrl->actuator_mutex)); return 0; } @@ -891,7 +896,9 @@ int32_t cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush_req) continue; if (i2c_set->is_settings_valid == 1) { + mutex_lock(&(a_ctrl->actuator_mutex)); rc = delete_request(i2c_set); + mutex_unlock(&(a_ctrl->actuator_mutex)); if (rc < 0) CAM_ERR(CAM_ACTUATOR, "delete request: %lld rc: %d", diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c index aaf6ba45c56ef05e832494bbb906bd6d67ca9214..2ffda23f6702e0d35efadda0185549c198e0fa17 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c @@ -20,20 +20,29 @@ #define SCM_SVC_CAMERASS 0x18 #define SECURE_SYSCALL_ID 0x6 +#define SECURE_SYSCALL_ID_2 0x7 + +#define LANE_MASK_2PH 0x1F +#define LANE_MASK_3PH 0x7 static int csiphy_dump; module_param(csiphy_dump, int, 0644); -static int cam_csiphy_notify_secure_mode(int phy, bool protect) +static int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev, + bool protect, int32_t offset) { struct scm_desc desc = {0}; + if (offset >= CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset"); + return -EINVAL; + } + desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL); desc.args[0] = protect; - desc.args[1] = phy; + desc.args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset]; - CAM_DBG(CAM_CSIPHY, "phy : %d, protect : %d", phy, protect); - if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID), + if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID_2), &desc)) { CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed"); return -EINVAL; @@ -42,6 +51,27 @@ static int cam_csiphy_notify_secure_mode(int phy, bool protect) return 0; } +int32_t cam_csiphy_get_instance_offset( + struct csiphy_device *csiphy_dev, + int32_t dev_handle) +{ + int32_t i; + + if (csiphy_dev->acquire_count > + CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid acquire count"); + return -EINVAL; + } + + for (i = 0; i < csiphy_dev->acquire_count; i++) { + if (dev_handle == + csiphy_dev->bridge_intf.device_hdl[i]) + break; + } + + return i; +} + void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev, struct cam_csiphy_query_cap *csiphy_cap) { @@ -75,6 +105,54 @@ void cam_csiphy_reset(struct csiphy_device *csiphy_dev) } } +int32_t cam_csiphy_update_secure_info( + struct csiphy_device *csiphy_dev, + struct cam_csiphy_info *cam_cmd_csiphy_info, + struct cam_config_dev_cmd *cfg_dev) +{ + uint32_t clock_lane, adj_lane_mask, temp; + int32_t offset; + + if (csiphy_dev->acquire_count >= + CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid acquire count"); + return -EINVAL; + } + + offset = cam_csiphy_get_instance_offset(csiphy_dev, + cfg_dev->dev_handle); + if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid offset"); + return -EINVAL; + } + + if (cam_cmd_csiphy_info->combo_mode) + clock_lane = + csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_combo_ck_ln; + else + clock_lane = + csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_clock_lane; + + adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_2PH & + ~clock_lane; + temp = adj_lane_mask & (clock_lane - 1); + adj_lane_mask = + ((adj_lane_mask & (~(clock_lane - 1))) >> 1) | temp; + + if (cam_cmd_csiphy_info->csiphy_3phase) + adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_3PH; + + csiphy_dev->csiphy_info.secure_mode[offset] = 1; + + csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = + adj_lane_mask << (csiphy_dev->soc_info.index * + (CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) + + (!cam_cmd_csiphy_info->csiphy_3phase) * + (CAM_CSIPHY_MAX_CPHY_LANES)); + + return 0; +} + int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev, struct cam_config_dev_cmd *cfg_dev) { @@ -136,7 +214,10 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev, csiphy_dev->csiphy_info.settle_time = cam_cmd_csiphy_info->settle_time; csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate; - csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode; + + if (cam_cmd_csiphy_info->secure_mode == 1) + cam_csiphy_update_secure_info(csiphy_dev, + cam_cmd_csiphy_info, cfg_dev); return rc; } @@ -234,7 +315,7 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev) cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_config_array_size; - lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x1f; + lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_2PH; for (i = 0; i < MAX_DPHY_DATA_LN; i++) { if (mask == 0x2) { if (lane_mask & mask) @@ -263,7 +344,7 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev) cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_3ph_config_array_size; - lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x7; + lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_3PH; mask = lane_mask; while (mask != 0) { temp = (i << 1)+1; @@ -369,6 +450,7 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev) void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev) { struct cam_hw_soc_info *soc_info; + int32_t i = 0; if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT) return; @@ -376,13 +458,17 @@ void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev) if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) { soc_info = &csiphy_dev->soc_info; - if (csiphy_dev->csiphy_info.secure_mode) - cam_csiphy_notify_secure_mode( - csiphy_dev->soc_info.index, - CAM_SECURE_MODE_NON_SECURE); + for (i = 0; i < csiphy_dev->acquire_count; i++) { + if (csiphy_dev->csiphy_info.secure_mode[i]) + cam_csiphy_notify_secure_mode( + csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, i); - csiphy_dev->csiphy_info.secure_mode = - CAM_SECURE_MODE_NON_SECURE; + csiphy_dev->csiphy_info.secure_mode[i] = + CAM_SECURE_MODE_NON_SECURE; + + csiphy_dev->csiphy_cpas_cp_reg_mask[i] = 0; + } cam_csiphy_reset(csiphy_dev); cam_soc_util_disable_platform_resource(soc_info, true, true); @@ -573,6 +659,16 @@ int32_t cam_csiphy_core_cfg(void *phy_dev, } break; case CAM_STOP_DEV: { + int32_t offset, rc = 0; + struct cam_start_stop_dev_cmd config; + + rc = copy_from_user(&config, (void __user *)cmd->handle, + sizeof(config)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + goto release_mutex; + } + if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) || !csiphy_dev->start_dev_count) { CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d", @@ -580,20 +676,38 @@ int32_t cam_csiphy_core_cfg(void *phy_dev, goto release_mutex; } + offset = cam_csiphy_get_instance_offset(csiphy_dev, + config.dev_handle); + if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid offset"); + goto release_mutex; + } + if (--csiphy_dev->start_dev_count) { CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d", csiphy_dev->start_dev_count); + if (csiphy_dev->csiphy_info.secure_mode[offset]) + cam_csiphy_notify_secure_mode( + csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, offset); + + csiphy_dev->csiphy_info.secure_mode[offset] = + CAM_SECURE_MODE_NON_SECURE; + csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0; + goto release_mutex; } - if (csiphy_dev->csiphy_info.secure_mode) + if (csiphy_dev->csiphy_info.secure_mode[offset]) cam_csiphy_notify_secure_mode( - csiphy_dev->soc_info.index, - CAM_SECURE_MODE_NON_SECURE); + csiphy_dev, + CAM_SECURE_MODE_NON_SECURE, offset); - csiphy_dev->csiphy_info.secure_mode = + csiphy_dev->csiphy_info.secure_mode[offset] = CAM_SECURE_MODE_NON_SECURE; + csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0; + rc = cam_csiphy_disable_hw(csiphy_dev); if (rc < 0) CAM_ERR(CAM_CSIPHY, "Failed in csiphy release"); @@ -667,12 +781,28 @@ int32_t cam_csiphy_core_cfg(void *phy_dev, case CAM_START_DEV: { struct cam_ahb_vote ahb_vote; struct cam_axi_vote axi_vote; + struct cam_start_stop_dev_cmd config; + int32_t offset; + + rc = copy_from_user(&config, (void __user *)cmd->handle, + sizeof(config)); + if (rc < 0) { + CAM_ERR(CAM_CSIPHY, "Failed copying from User"); + goto release_mutex; + } if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) { csiphy_dev->start_dev_count++; goto release_mutex; } + offset = cam_csiphy_get_instance_offset(csiphy_dev, + config.dev_handle); + if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) { + CAM_ERR(CAM_CSIPHY, "Invalid offset"); + goto release_mutex; + } + ahb_vote.type = CAM_VOTE_ABSOLUTE; ahb_vote.vote.level = CAM_SVS_VOTE; axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW; @@ -685,12 +815,12 @@ int32_t cam_csiphy_core_cfg(void *phy_dev, goto release_mutex; } - if (csiphy_dev->csiphy_info.secure_mode) { + if (csiphy_dev->csiphy_info.secure_mode[offset] == 1) { rc = cam_csiphy_notify_secure_mode( - csiphy_dev->soc_info.index, - CAM_SECURE_MODE_SECURE); + csiphy_dev, + CAM_SECURE_MODE_SECURE, offset); if (rc < 0) - csiphy_dev->csiphy_info.secure_mode = + csiphy_dev->csiphy_info.secure_mode[offset] = CAM_SECURE_MODE_NON_SECURE; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h index 68f20b4acd597822c4268a7ede323bf8e5302db3..248903251d62195c90057aec2b354df41c622c84 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h @@ -60,6 +60,11 @@ #define CSIPHY_2PH_REGS 5 #define CSIPHY_3PH_REGS 6 +#define CSIPHY_MAX_INSTANCES 2 + +#define CAM_CSIPHY_MAX_DPHY_LANES 4 +#define CAM_CSIPHY_MAX_CPHY_LANES 3 + #define ENABLE_IRQ false #undef CDBG @@ -89,6 +94,15 @@ enum cam_csiphy_state { * @csiphy_version: CSIPhy Version * @csiphy_common_array_size: CSIPhy common array size * @csiphy_reset_array_size: CSIPhy reset array size + * @csiphy_2ph_config_array_size: 2ph settings size + * @csiphy_3ph_config_array_size: 3ph settings size + * @csiphy_cpas_cp_bits_per_phy: CP bits per phy + * @csiphy_cpas_cp_is_interleaved: checks whether cp bits + * are interleaved or not + * @csiphy_cpas_cp_2ph_offset: cp register 2ph offset + * @csiphy_cpas_cp_3ph_offset: cp register 3ph offset + * @csiphy_2ph_clock_lane: clock lane in 2ph + * @csiphy_2ph_combo_ck_ln: clk lane in combo 2ph */ struct csiphy_reg_parms_t { /*MIPI CSI PHY registers*/ @@ -103,6 +117,12 @@ struct csiphy_reg_parms_t { uint32_t csiphy_reset_array_size; uint32_t csiphy_2ph_config_array_size; uint32_t csiphy_3ph_config_array_size; + uint32_t csiphy_cpas_cp_bits_per_phy; + uint32_t csiphy_cpas_cp_is_interleaved; + uint32_t csiphy_cpas_cp_2ph_offset; + uint32_t csiphy_cpas_cp_3ph_offset; + uint32_t csiphy_2ph_clock_lane; + uint32_t csiphy_2ph_combo_ck_ln; }; /** @@ -113,9 +133,9 @@ struct csiphy_reg_parms_t { * @crm_cb: Callback API pointers */ struct intf_params { - int32_t device_hdl[2]; - int32_t session_hdl[2]; - int32_t link_hdl[2]; + int32_t device_hdl[CSIPHY_MAX_INSTANCES]; + int32_t session_hdl[CSIPHY_MAX_INSTANCES]; + int32_t link_hdl[CSIPHY_MAX_INSTANCES]; struct cam_req_mgr_kmd_ops ops; struct cam_req_mgr_crm_cb *crm_cb; }; @@ -177,7 +197,7 @@ struct cam_csiphy_param { uint8_t csiphy_3phase; uint8_t combo_mode; uint8_t lane_cnt; - uint8_t secure_mode; + uint8_t secure_mode[CSIPHY_MAX_INSTANCES]; uint64_t settle_time; uint64_t settle_time_combo_sensor; uint64_t data_rate; @@ -208,6 +228,10 @@ struct cam_csiphy_param { * @is_acquired_dev_combo_mode: * Flag that mentions whether already acquired * device is for combo mode + * @soc_info: SOC information + * @cpas_handle: CPAS handle + * @config_count: Config reg count + * @csiphy_cpas_cp_reg_mask: CP reg mask for phy instance */ struct csiphy_device { struct mutex mutex; @@ -233,6 +257,7 @@ struct csiphy_device { struct cam_hw_soc_info soc_info; uint32_t cpas_handle; uint32_t config_count; + uint64_t csiphy_cpas_cp_reg_mask[CSIPHY_MAX_INSTANCES]; }; #endif /* _CAM_CSIPHY_DEV_H_ */ diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h index 0ebaa4619deba91c0c683d26f0f019ac82d2c9c7..b1ceb0109155d540373d4834c970d238d5781c55 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h @@ -23,6 +23,8 @@ struct csiphy_reg_parms_t csiphy_v1_1 = { .csiphy_reset_array_size = 5, .csiphy_2ph_config_array_size = 14, .csiphy_3ph_config_array_size = 43, + .csiphy_2ph_clock_lane = 0x1, + .csiphy_2ph_combo_ck_ln = 0x10, }; struct csiphy_reg_t csiphy_common_reg_1_1[] = { diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c index ca91bc7905a92e94b5f6a18c4b32c21db8e5d1cf..a57ecef07dc40383aed79fe3ceef7f6c016886b8 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c @@ -221,7 +221,7 @@ static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl) CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info); return -EINVAL; } - rc = msm_camera_power_down(power_info, soc_info); + rc = cam_sensor_util_power_down(power_info, soc_info); if (rc) { CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc); return rc; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile index c7889a5fc2f56cb782dff9d172529c18b7035671..4d1cbdc3c5a28a495d624ddef76235174750bc8a 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile @@ -1,10 +1,11 @@ -ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils -ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync +ccflags-y += -Idrivers/media/platform/msm/camera/cam_core +ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include +ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr +ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr -ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io -ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci -ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr -ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/ +ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils +ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu +ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c index 9af1f51204854ee685c963c189a59421dada217e..a7c74dc07484c711466c44ebf445dd3747be4b2b 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c @@ -16,7 +16,7 @@ #include "cam_flash_core.h" #include "cam_res_mgr_api.h" -int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl, +static int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl, bool regulator_enable) { int rc = 0; @@ -89,7 +89,7 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl, return rc; } -static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl) +static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl) { int j = 0; struct cam_flash_frame_setting *nrt_settings; @@ -120,20 +120,187 @@ static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl) return 0; } -int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush) +static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl) +{ + int rc = 0; + + if (fctrl->i2c_data.init_settings.is_settings_valid == true) { + rc = delete_request(&fctrl->i2c_data.init_settings); + if (rc) { + CAM_WARN(CAM_FLASH, + "Failed to delete Init i2c_setting: %d", + rc); + return rc; + } + } + if (fctrl->i2c_data.config_settings.is_settings_valid == true) { + rc = delete_request(&fctrl->i2c_data.config_settings); + if (rc) { + CAM_WARN(CAM_FLASH, + "Failed to delete NRT i2c_setting: %d", + rc); + return rc; + } + } + + return rc; +} + +static int cam_flash_construct_default_power_setting( + struct cam_sensor_power_ctrl_t *power_info) +{ + int rc = 0; + + power_info->power_setting_size = 1; + power_info->power_setting = + (struct cam_sensor_power_setting *) + kzalloc(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_setting) + return -ENOMEM; + + power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1; + power_info->power_setting[0].seq_val = CAM_V_CUSTOM1; + power_info->power_setting[0].config_val = 0; + power_info->power_setting[0].delay = 2; + + power_info->power_down_setting_size = 1; + power_info->power_down_setting = + (struct cam_sensor_power_setting *) + kzalloc(sizeof(struct cam_sensor_power_setting), + GFP_KERNEL); + if (!power_info->power_down_setting) { + rc = -ENOMEM; + goto free_power_settings; + } + + power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1; + power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1; + power_info->power_down_setting[0].config_val = 0; + + return rc; + +free_power_settings: + kfree(power_info->power_setting); + power_info->power_setting = NULL; + power_info->power_setting_size = 0; + return rc; +} + +int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable) +{ + int rc = 0; + + if (!(fctrl->switch_trigger)) { + CAM_ERR(CAM_FLASH, "Invalid argument"); + return -EINVAL; + } + + if (regulator_enable) { + rc = cam_flash_prepare(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + return rc; + } + } + + if (!regulator_enable) { + if ((fctrl->flash_state == CAM_FLASH_STATE_START) && + (fctrl->is_regulator_enabled == true)) { + rc = cam_flash_prepare(fctrl, false); + if (rc) + CAM_ERR(CAM_FLASH, + "Disable Regulator Failed rc: %d", rc); + } + } + + return rc; +} + +int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable) +{ + int rc = 0; + struct cam_hw_soc_info *soc_info = &fctrl->soc_info; + struct cam_sensor_power_ctrl_t *power_info = + &fctrl->power_info; + + if (!power_info || !soc_info) { + CAM_ERR(CAM_FLASH, "Power Info is NULL"); + return -EINVAL; + } + power_info->dev = soc_info->dev; + + if (regulator_enable && (fctrl->is_regulator_enabled == false)) { + if ((power_info->power_setting == NULL) && + (power_info->power_down_setting == NULL)) { + CAM_INFO(CAM_FLASH, + "Using default power settings"); + rc = cam_flash_construct_default_power_setting( + power_info); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "Construct default pwr setting failed rc: %d", + rc); + return rc; + } + } + + rc = cam_sensor_core_power_up(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "power up the core is failed:%d", + rc); + goto free_pwr_settings; + } + + rc = camera_io_init(&(fctrl->io_master_info)); + if (rc) { + CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc); + cam_sensor_util_power_down(power_info, soc_info); + goto free_pwr_settings; + } + fctrl->is_regulator_enabled = true; + } else if ((!regulator_enable) && + (fctrl->is_regulator_enabled == true)) { + rc = cam_sensor_util_power_down(power_info, soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "power down the core is failed:%d", + rc); + return rc; + } + camera_io_release(&(fctrl->io_master_info)); + fctrl->is_regulator_enabled = false; + goto free_pwr_settings; + } + return rc; + +free_pwr_settings: + kfree(power_info->power_setting); + kfree(power_info->power_down_setting); + power_info->power_setting = NULL; + power_info->power_down_setting = NULL; + power_info->power_setting_size = 0; + power_info->power_down_setting_size = 0; + + return rc; +} + +int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id) { int rc = 0; int i = 0, j = 0; - struct cam_flash_ctrl *fctrl = NULL; int frame_offset = 0; - fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl); if (!fctrl) { CAM_ERR(CAM_FLASH, "Device data is NULL"); return -EINVAL; } - if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + if (type == FLUSH_ALL) { + cam_flash_off(fctrl); /* flush all requests*/ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { fctrl->per_frame[i].cmn_attr.request_id = 0; @@ -143,19 +310,105 @@ int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush) fctrl->per_frame[i].led_current_ma[j] = 0; } - rc = cam_flash_flush_nrt(fctrl); - if (rc) - CAM_ERR(CAM_FLASH, "NonRealTime flush error"); - } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + cam_flash_pmic_flush_nrt(fctrl); + } else if ((type == FLUSH_REQ) && (req_id != 0)) { /* flush request with req_id*/ - frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY; + frame_offset = req_id % MAX_PER_FRAME_ARRAY; fctrl->per_frame[frame_offset].cmn_attr.request_id = 0; fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid = false; fctrl->per_frame[frame_offset].cmn_attr.count = 0; for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++) fctrl->per_frame[frame_offset].led_current_ma[i] = 0; + } else if ((type == FLUSH_REQ) && (req_id == 0)) { + /* Handels NonRealTime usecase */ + cam_flash_pmic_flush_nrt(fctrl); + } else { + CAM_ERR(CAM_FLASH, "Invalid arguments"); + return -EINVAL; + } + + return rc; +} + +int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id) +{ + int rc = 0; + int i = 0; + uint32_t cancel_req_id_found = 0; + struct i2c_settings_array *i2c_set = NULL; + + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + if ((type == FLUSH_REQ) && (req_id == 0)) { + /* This setting will be called only when NonRealTime + * settings needs to clean. + */ + cam_flash_i2c_flush_nrt(fctrl); + } else { + /* All other usecase will be handle here */ + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + i2c_set = &(fctrl->i2c_data.per_frame[i]); + + if ((type == FLUSH_REQ) && + (i2c_set->request_id != req_id)) + continue; + + if (i2c_set->is_settings_valid == 1) { + rc = delete_request(i2c_set); + if (rc < 0) + CAM_ERR(CAM_FLASH, + "delete request: %lld rc: %d", + i2c_set->request_id, rc); + + if (type == FLUSH_REQ) { + cancel_req_id_found = 1; + break; + } + } + } } + + if ((type == FLUSH_REQ) && (req_id != 0) && + (!cancel_req_id_found)) + CAM_DBG(CAM_FLASH, + "Flush request id:%lld not found in the pending list", + req_id); + + return rc; +} + +int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush) +{ + int rc = 0; + struct cam_flash_ctrl *fctrl = NULL; + + fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl); + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Device data is NULL"); + return -EINVAL; + } + + mutex_lock(&fctrl->flash_mutex); + if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) { + rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + if (rc) { + CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc); + goto end; + } + } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) { + rc = fctrl->func_tbl.flush_req(fctrl, + FLUSH_REQ, flush->req_id); + if (rc) { + CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc); + goto end; + } + } +end: + mutex_unlock(&fctrl->flash_mutex); return rc; } @@ -282,26 +535,51 @@ static int cam_flash_high( return rc; } -static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id) +static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + int i = 0, rc = 0; + uint64_t top = 0, del_req_id = 0; + + if (req_id != 0) { + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { + if ((req_id >= + fctrl->i2c_data.per_frame[i].request_id) && + (top < + fctrl->i2c_data.per_frame[i].request_id) && + (fctrl->i2c_data.per_frame[i].is_settings_valid + == 1)) { + del_req_id = top; + top = fctrl->i2c_data.per_frame[i].request_id; + } + } + + if (top < req_id) { + if ((((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) || + (((top % MAX_PER_FRAME_ARRAY) - (req_id % + MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX)) + del_req_id = req_id; + } + + if (!del_req_id) + return rc; + + CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu", + top, del_req_id); + } + fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id); + return 0; +} + +static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl, + uint64_t req_id) { int i = 0; struct cam_flash_frame_setting *flash_data = NULL; uint64_t top = 0, del_req_id = 0; - if (req_id == 0) { - flash_data = &fctrl->nrt_info; - if ((fctrl->nrt_info.cmn_attr.cmd_type == - CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) || - (fctrl->nrt_info.cmn_attr.cmd_type == - CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) { - flash_data->cmn_attr.is_settings_valid = false; - for (i = 0; i < flash_data->cmn_attr.count; i++) - flash_data->led_current_ma[i] = 0; - } else { - fctrl->flash_init_setting.cmn_attr.is_settings_valid - = false; - } - } else { + if (req_id != 0) { for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { flash_data = &fctrl->per_frame[i]; if (req_id >= flash_data->cmn_attr.request_id && @@ -333,27 +611,100 @@ static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id) CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu", top, del_req_id); + } - for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { - flash_data = &fctrl->per_frame[i]; - if ((del_req_id == - flash_data->cmn_attr.request_id) && - (flash_data->cmn_attr.is_settings_valid == 1)) { - CAM_DBG(CAM_FLASH, "Deleting request[%d] %llu", - i, flash_data->cmn_attr.request_id); - flash_data->cmn_attr.request_id = 0; - flash_data->cmn_attr.is_settings_valid = false; - flash_data->opcode = 0; - for (i = 0; i < flash_data->cmn_attr.count; i++) - flash_data->led_current_ma[i] = 0; + fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id); + return 0; +} + +static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl, + uint32_t *cmd_buf) +{ + int32_t rc = 0; + struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf; + + if (fctrl->io_master_info.master_type == CCI_MASTER) { + fctrl->io_master_info.cci_client->cci_i2c_master = + fctrl->cci_i2c_master; + fctrl->io_master_info.cci_client->i2c_freq_mode = + i2c_info->i2c_freq_mode; + fctrl->io_master_info.cci_client->sid = + i2c_info->slave_addr >> 1; + CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d", + i2c_info->slave_addr, i2c_info->i2c_freq_mode); + } else if (fctrl->io_master_info.master_type == I2C_MASTER) { + fctrl->io_master_info.client->addr = i2c_info->slave_addr; + CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr); + } else { + CAM_ERR(CAM_FLASH, "Invalid Master type: %d", + fctrl->io_master_info.master_type); + rc = -EINVAL; + } + + return rc; +} + +int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, + uint64_t req_id) +{ + struct i2c_settings_list *i2c_list; + struct i2c_settings_array *i2c_set = NULL; + int frame_offset = 0, rc = 0; + + if (req_id == 0) { + /* NonRealTime Init settings*/ + if (fctrl->i2c_data.init_settings.is_settings_valid == true) { + list_for_each_entry(i2c_list, + &(fctrl->i2c_data.init_settings.list_head), + list) { + rc = cam_sensor_util_i2c_apply_setting + (&(fctrl->io_master_info), i2c_list); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply init settings: %d", + rc); + return rc; + } + } + } + /* NonRealTime (Widget/RER/INIT_FIRE settings) */ + if (fctrl->i2c_data.config_settings.is_settings_valid == true) { + list_for_each_entry(i2c_list, + &(fctrl->i2c_data.config_settings.list_head), + list) { + rc = cam_sensor_util_i2c_apply_setting + (&(fctrl->io_master_info), i2c_list); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply NRT settings: %d", rc); + return rc; + } + } + } + } else { + /* RealTime */ + frame_offset = req_id % MAX_PER_FRAME_ARRAY; + i2c_set = &fctrl->i2c_data.per_frame[frame_offset]; + if ((i2c_set->is_settings_valid == true) && + (i2c_set->request_id == req_id)) { + list_for_each_entry(i2c_list, + &(i2c_set->list_head), list) { + rc = cam_sensor_util_i2c_apply_setting( + &(fctrl->io_master_info), i2c_list); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed to apply settings: %d", rc); + return rc; + } } } } - return 0; + cam_flash_i2c_delete_req(fctrl, req_id); + return rc; } -int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, +int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id) { int rc = 0, i = 0; @@ -455,7 +806,6 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, } else if (fctrl->nrt_info.cmn_attr.cmd_type == CAMERA_SENSOR_FLASH_CMD_TYPE_RER) { flash_data = &fctrl->nrt_info; - if (fctrl->flash_state != CAM_FLASH_STATE_START) { rc = cam_flash_off(fctrl); if (rc) { @@ -488,8 +838,7 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, rc = cam_flash_off(fctrl); if (rc) { CAM_ERR(CAM_FLASH, - "Flash off failed: %d", - rc); + "Flash off failed: %d", rc); continue; } fctrl->flash_state = CAM_FLASH_STATE_START; @@ -551,12 +900,321 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, } nrt_del_req: - delete_req(fctrl, req_id); + cam_flash_pmic_delete_req(fctrl, req_id); apply_setting_err: return rc; } -int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) +int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg) +{ + int rc = 0, i = 0; + uint64_t generic_ptr; + uint32_t total_cmd_buf_in_bytes = 0; + uint32_t processed_cmd_buf_in_bytes = 0; + uint16_t cmd_length_in_bytes = 0; + uint32_t *cmd_buf = NULL; + uint32_t *offset = NULL; + uint32_t frm_offset = 0; + size_t len_of_buffer; + struct cam_flash_init *flash_init = NULL; + struct common_header *cmn_hdr = NULL; + struct cam_control *ioctl_ctrl = NULL; + struct cam_packet *csl_packet = NULL; + struct cam_cmd_buf_desc *cmd_desc = NULL; + struct cam_config_dev_cmd config; + struct cam_req_mgr_add_request add_req; + struct i2c_data_settings *i2c_data = NULL; + struct i2c_settings_array *i2c_reg_settings = NULL; + struct cam_sensor_power_ctrl_t *power_info = NULL; + + if (!fctrl || !arg) { + CAM_ERR(CAM_FLASH, "fctrl/arg is NULL"); + return -EINVAL; + } + /* getting CSL Packet */ + ioctl_ctrl = (struct cam_control *)arg; + + if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle, + sizeof(config))) { + CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed"); + return -EFAULT; + } + + rc = cam_mem_get_cpu_buf(config.packet_handle, + (uint64_t *)&generic_ptr, &len_of_buffer); + if (rc) { + CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc); + return rc; + } + + if (config.offset > len_of_buffer) { + CAM_ERR(CAM_FLASH, + "offset is out of bounds: offset: %lld len: %zu", + config.offset, len_of_buffer); + return -EINVAL; + } + + /* Add offset to the flash csl header */ + csl_packet = (struct cam_packet *)(generic_ptr + config.offset); + switch (csl_packet->header.op_code & 0xFFFFFF) { + case CAM_FLASH_PACKET_OPCODE_INIT: { + /* INIT packet*/ + offset = (uint32_t *)((uint8_t *)&csl_packet->payload + + csl_packet->cmd_buf_offset); + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + + /* Loop through multiple command buffers */ + for (i = 1; i < csl_packet->num_cmd_buf; i++) { + total_cmd_buf_in_bytes = cmd_desc[i].length; + processed_cmd_buf_in_bytes = 0; + if (!total_cmd_buf_in_bytes) + continue; + rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle, + (uint64_t *)&generic_ptr, &len_of_buffer); + if (rc < 0) { + CAM_ERR(CAM_FLASH, "Failed to get cpu buf"); + return rc; + } + cmd_buf = (uint32_t *)generic_ptr; + if (!cmd_buf) { + CAM_ERR(CAM_FLASH, "invalid cmd buf"); + return -EINVAL; + } + cmd_buf += cmd_desc[i].offset / sizeof(uint32_t); + cmn_hdr = (struct common_header *)cmd_buf; + + /* Loop through cmd formats in one cmd buffer */ + CAM_DBG(CAM_FLASH, + "command Type: %d,Processed: %d,Total: %d", + cmn_hdr->cmd_type, processed_cmd_buf_in_bytes, + total_cmd_buf_in_bytes); + switch (cmn_hdr->cmd_type) { + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: + flash_init = (struct cam_flash_init *)cmd_buf; + fctrl->flash_type = flash_init->flash_type; + cmd_length_in_bytes = + sizeof(struct cam_flash_init); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + break; + case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: + rc = cam_flash_slaveInfo_pkt_parser( + fctrl, cmd_buf); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "Failed parsing slave info: rc: %d", + rc); + return rc; + } + cmd_length_in_bytes = + sizeof(struct cam_cmd_i2c_info); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + break; + case CAMERA_SENSOR_CMD_TYPE_PWR_UP: + case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN: + CAM_DBG(CAM_FLASH, + "Received power settings"); + cmd_length_in_bytes = + total_cmd_buf_in_bytes; + rc = cam_sensor_update_power_settings( + cmd_buf, + total_cmd_buf_in_bytes, + &fctrl->power_info); + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed update power settings"); + return rc; + } + break; + default: + CAM_DBG(CAM_FLASH, + "Received initSettings"); + i2c_data = &(fctrl->i2c_data); + i2c_reg_settings = + &fctrl->i2c_data.init_settings; + + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = 1; + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, + &cmd_desc[i], 1); + if (rc < 0) { + CAM_ERR(CAM_FLASH, + "pkt parsing failed: %d", rc); + return rc; + } + cmd_length_in_bytes = + cmd_desc[i].length; + processed_cmd_buf_in_bytes += + cmd_length_in_bytes; + cmd_buf += cmd_length_in_bytes/ + sizeof(uint32_t); + + break; + } + } + power_info = &fctrl->power_info; + if (!power_info) { + CAM_ERR(CAM_FLASH, "Power_info is NULL"); + return -EINVAL; + } + + /* Parse and fill vreg params for power up settings */ + rc = msm_camera_fill_vreg_params(&fctrl->soc_info, + power_info->power_setting, + power_info->power_setting_size); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed to fill vreg params for power up rc:%d", + rc); + return rc; + } + + /* Parse and fill vreg params for power down settings*/ + rc = msm_camera_fill_vreg_params( + &fctrl->soc_info, + power_info->power_down_setting, + power_info->power_down_setting_size); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed to fill vreg params power down rc:%d", + rc); + return rc; + } + + rc = fctrl->func_tbl.power_ops(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + return rc; + } + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) { + CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc); + return rc; + } + + fctrl->flash_state = CAM_FLASH_STATE_CONFIG; + break; + } + case CAM_FLASH_PACKET_OPCODE_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload + + csl_packet->cmd_buf_offset); + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + /* add support for handling i2c_data*/ + i2c_reg_settings = + &fctrl->i2c_data.per_frame[frm_offset]; + if (i2c_reg_settings->is_settings_valid == true) { + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = false; + goto update_req_mgr; + } + i2c_reg_settings->is_settings_valid = true; + i2c_reg_settings->request_id = + csl_packet->header.request_id; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in parsing i2c packets"); + return rc; + } + break; + } + case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: { + offset = (uint32_t *)((uint8_t *)&csl_packet->payload + + csl_packet->cmd_buf_offset); + + /* add support for handling i2c_data*/ + i2c_reg_settings = &fctrl->i2c_data.config_settings; + if (i2c_reg_settings->is_settings_valid == true) { + i2c_reg_settings->request_id = 0; + i2c_reg_settings->is_settings_valid = false; + + rc = delete_request(i2c_reg_settings); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in Deleting the err: %d", rc); + return rc; + } + } + i2c_reg_settings->is_settings_valid = true; + i2c_reg_settings->request_id = + csl_packet->header.request_id; + cmd_desc = (struct cam_cmd_buf_desc *)(offset); + rc = cam_sensor_i2c_command_parser( + &fctrl->io_master_info, + i2c_reg_settings, cmd_desc, 1); + if (rc) { + CAM_ERR(CAM_FLASH, + "Failed in parsing i2c NRT packets"); + return rc; + } + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, + "Apply setting failed: %d", rc); + return rc; + } + case CAM_PKT_NOP_OPCODE: { + if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || + (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) { + CAM_WARN(CAM_FLASH, + "Rxed NOP packets without linking"); + frm_offset = csl_packet->header.request_id % + MAX_PER_FRAME_ARRAY; + fctrl->i2c_data.per_frame[frm_offset].is_settings_valid + = false; + return 0; + } + + CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u", + csl_packet->header.request_id); + goto update_req_mgr; + } + default: + CAM_ERR(CAM_FLASH, "Wrong Opcode : %d", + (csl_packet->header.op_code & 0xFFFFFF)); + return -EINVAL; + } +update_req_mgr: + if (((csl_packet->header.op_code & 0xFFFFF) == + CAM_PKT_NOP_OPCODE) || + ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS)) { + add_req.link_hdl = fctrl->bridge_intf.link_hdl; + add_req.req_id = csl_packet->header.request_id; + add_req.dev_hdl = fctrl->bridge_intf.device_hdl; + + if ((csl_packet->header.op_code & 0xFFFFF) == + CAM_FLASH_PACKET_OPCODE_SET_OPS) + add_req.skip_before_applying = 1; + else + add_req.skip_before_applying = 0; + + if (fctrl->bridge_intf.crm_cb && + fctrl->bridge_intf.crm_cb->add_req) + fctrl->bridge_intf.crm_cb->add_req(&add_req); + CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id); + } + return rc; +} + +int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg) { int rc = 0, i = 0; uint64_t generic_ptr; @@ -617,8 +1275,6 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) /* INIT packet*/ offset = (uint32_t *)((uint8_t *)&csl_packet->payload + csl_packet->cmd_buf_offset); - fctrl->flash_init_setting.cmn_attr.request_id = 0; - fctrl->flash_init_setting.cmn_attr.is_settings_valid = true; cmd_desc = (struct cam_cmd_buf_desc *)(offset); rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, (uint64_t *)&generic_ptr, &len_of_buffer); @@ -627,40 +1283,53 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) cam_flash_info = (struct cam_flash_init *)cmd_buf; switch (cam_flash_info->cmd_type) { - case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: { + CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED"); + fctrl->flash_init_setting.cmn_attr.request_id = 0; + fctrl->flash_init_setting.cmn_attr.is_settings_valid = + true; fctrl->flash_type = cam_flash_info->flash_type; fctrl->is_regulator_enabled = false; fctrl->nrt_info.cmn_attr.cmd_type = CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO; + + rc = fctrl->func_tbl.power_ops(fctrl, true); + if (rc) { + CAM_ERR(CAM_FLASH, + "Enable Regulator Failed rc = %d", rc); + return rc; + } + fctrl->flash_state = CAM_FLASH_STATE_CONFIG; break; - case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: - CAM_DBG(CAM_FLASH, "INIT Fire Operation"); - flash_operation_info = - (struct cam_flash_set_on_off *) cmd_buf; - fctrl->nrt_info.cmn_attr.count = - flash_operation_info->count; - fctrl->nrt_info.cmn_attr.request_id = 0; - fctrl->nrt_info.opcode = - flash_operation_info->opcode; - fctrl->nrt_info.cmn_attr.cmd_type = - CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE; - for (i = 0; - i < flash_operation_info->count; i++) - fctrl->nrt_info.led_current_ma[i] = - flash_operation_info->led_current_ma[i]; + } + case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: { + CAM_DBG(CAM_FLASH, "INIT_FIRE Operation"); - mutex_lock(&fctrl->flash_wq_mutex); - rc = cam_flash_apply_setting(fctrl, 0); - if (rc) - CAM_ERR(CAM_FLASH, - "Apply setting failed: %d", - rc); - mutex_unlock(&fctrl->flash_wq_mutex); - fctrl->flash_state = - CAM_FLASH_STATE_CONFIG; + flash_operation_info = + (struct cam_flash_set_on_off *) cmd_buf; + fctrl->nrt_info.cmn_attr.count = + flash_operation_info->count; + fctrl->nrt_info.cmn_attr.request_id = 0; + fctrl->nrt_info.opcode = + flash_operation_info->opcode; + fctrl->nrt_info.cmn_attr.cmd_type = + CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE; + for (i = 0; + i < flash_operation_info->count; i++) + fctrl->nrt_info.led_current_ma[i] = + flash_operation_info->led_current_ma[i]; + + rc = fctrl->func_tbl.apply_setting(fctrl, 0); + if (rc) + CAM_ERR(CAM_FLASH, + "Apply setting failed: %d", + rc); + + fctrl->flash_state = CAM_FLASH_STATE_CONFIG; break; + } default: CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d", cam_flash_info->cmd_type); @@ -698,7 +1367,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) switch (cmn_hdr->cmd_type) { case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: { CAM_DBG(CAM_FLASH, - "CAMERA_FLASH_CMD_TYPE_OPS case called"); + "CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called"); if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) { @@ -722,8 +1391,8 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) for (i = 0; i < flash_operation_info->count; i++) flash_data->led_current_ma[i] = flash_operation_info->led_current_ma[i]; - } - break; + } + break; default: CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d", cmn_hdr->cmd_type); @@ -759,12 +1428,10 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) fctrl->nrt_info.led_current_ma[i] = flash_operation_info->led_current_ma[i]; - mutex_lock(&fctrl->flash_wq_mutex); - rc = cam_flash_apply_setting(fctrl, 0); + rc = fctrl->func_tbl.apply_setting(fctrl, 0); if (rc) CAM_ERR(CAM_FLASH, "Apply setting failed: %d", rc); - mutex_unlock(&fctrl->flash_wq_mutex); return rc; } case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: { @@ -814,12 +1481,10 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) flash_rer_info->led_current_ma[i]; - mutex_lock(&fctrl->flash_wq_mutex); - rc = cam_flash_apply_setting(fctrl, 0); + rc = fctrl->func_tbl.apply_setting(fctrl, 0); if (rc) CAM_ERR(CAM_FLASH, "apply_setting failed: %d", rc); - mutex_unlock(&fctrl->flash_wq_mutex); return rc; } default: @@ -827,7 +1492,6 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) cmn_hdr->cmd_type); return -EINVAL; } - break; } case CAM_PKT_NOP_OPCODE: { @@ -845,7 +1509,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false; fctrl->per_frame[frm_offset].cmn_attr.request_id = 0; fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE; - CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u", + CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu", csl_packet->header.request_id); goto update_req_mgr; } @@ -899,7 +1563,7 @@ int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link) CAM_ERR(CAM_FLASH, " Device data is NULL"); return -EINVAL; } - + mutex_lock(&fctrl->flash_mutex); if (link->link_enable) { fctrl->bridge_intf.link_hdl = link->link_hdl; fctrl->bridge_intf.crm_cb = link->crm_cb; @@ -907,43 +1571,11 @@ int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link) fctrl->bridge_intf.link_hdl = -1; fctrl->bridge_intf.crm_cb = NULL; } + mutex_unlock(&fctrl->flash_mutex); return 0; } - -int cam_flash_stop_dev(struct cam_flash_ctrl *fctrl) -{ - int rc = 0, i, j; - - cam_flash_off(fctrl); - - for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { - fctrl->per_frame[i].cmn_attr.request_id = 0; - fctrl->per_frame[i].cmn_attr.is_settings_valid = false; - fctrl->per_frame[i].cmn_attr.count = 0; - for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++) - fctrl->per_frame[i].led_current_ma[j] = 0; - } - - rc = cam_flash_flush_nrt(fctrl); - if (rc) { - CAM_ERR(CAM_FLASH, - "NonRealTime Dev flush failed rc: %d", rc); - return rc; - } - - if ((fctrl->flash_state == CAM_FLASH_STATE_START) && - (fctrl->is_regulator_enabled == true)) { - rc = cam_flash_prepare(fctrl, false); - if (rc) - CAM_ERR(CAM_FLASH, "Disable Regulator Failed rc: %d", - rc); - } - - return rc; -} - int cam_flash_release_dev(struct cam_flash_ctrl *fctrl) { int rc = 0; @@ -971,9 +1603,13 @@ void cam_flash_shutdown(struct cam_flash_ctrl *fctrl) if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) || (fctrl->flash_state == CAM_FLASH_STATE_START)) { - rc = cam_flash_stop_dev(fctrl); + mutex_lock(&(fctrl->flash_mutex)); + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + mutex_unlock(&(fctrl->flash_mutex)); + rc = fctrl->func_tbl.power_ops(fctrl, false); if (rc) - CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc); + CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d", + rc); } rc = cam_flash_release_dev(fctrl); @@ -997,12 +1633,12 @@ int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply) return -EINVAL; } - mutex_lock(&fctrl->flash_wq_mutex); - rc = cam_flash_apply_setting(fctrl, apply->request_id); + mutex_lock(&fctrl->flash_mutex); + rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id); if (rc) CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d", rc); - mutex_unlock(&fctrl->flash_wq_mutex); + mutex_unlock(&fctrl->flash_mutex); return rc; } diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h index 1f527b3413196b8668936a0df909265f712c1649..5b886ef27bfe3b2901afa4188c45266e2865561c 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h @@ -15,20 +15,12 @@ #include #include "cam_flash_dev.h" -#include "cam_sync_api.h" -#include "cam_mem_mgr_api.h" -int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg); int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info); int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link); -int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id); int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply); int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data); int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush); -int cam_flash_off(struct cam_flash_ctrl *fctrl); -int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl, - bool regulator_enable); -void cam_flash_shutdown(struct cam_flash_ctrl *flash_ctrl); -int cam_flash_stop_dev(struct cam_flash_ctrl *flash_ctrl); -int cam_flash_release_dev(struct cam_flash_ctrl *fctrl); + + #endif /*_CAM_FLASH_CORE_H_*/ diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c index f8be3de85708b47f7a8e51320d6c326c373fadf9..199b50502cad026c72765d4344beab48e4a0ee3c 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c @@ -93,7 +93,7 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) || (fctrl->flash_state == CAM_FLASH_STATE_START)) { CAM_WARN(CAM_FLASH, - "Cannot apply Release dev: Prev state:%d", + "Wrong state for Release dev: Prev state:%d", fctrl->flash_state); } @@ -106,11 +106,18 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, rc = -EINVAL; goto release_mutex; } - rc = cam_flash_release_dev(fctrl); - if (rc) - CAM_ERR(CAM_FLASH, - "Failed in destroying the device Handle rc= %d", - rc); + + if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) || + (fctrl->flash_state == CAM_FLASH_STATE_START)) + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); + + if (cam_flash_release_dev(fctrl)) + CAM_WARN(CAM_FLASH, + "Failed in destroying the device Handle"); + + if (fctrl->func_tbl.power_ops(fctrl, false)) + CAM_WARN(CAM_FLASH, "Power Down Failed"); + fctrl->flash_state = CAM_FLASH_STATE_INIT; break; } @@ -149,15 +156,6 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, goto release_mutex; } - if (fctrl->is_regulator_enabled == false) { - rc = cam_flash_prepare(fctrl, true); - if (rc) { - CAM_ERR(CAM_FLASH, - "Enable Regulator Failed rc = %d", rc); - goto release_mutex; - } - } - fctrl->flash_state = CAM_FLASH_STATE_START; break; } @@ -171,18 +169,13 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, goto release_mutex; } - rc = cam_flash_stop_dev(fctrl); - if (rc) { - CAM_ERR(CAM_FLASH, "Stop Dev Failed rc = %d", - rc); - goto release_mutex; - } + fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0); fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE; break; } case CAM_CONFIG_DEV: { CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV"); - rc = cam_flash_parser(fctrl, arg); + rc = fctrl->func_tbl.parser(fctrl, arg); if (rc) { CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc); goto release_mutex; @@ -199,6 +192,35 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl, return rc; } +static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl) +{ + /* Validate input parameters */ + if (!fctrl) { + CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK", + fctrl); + return -EINVAL; + } + + CAM_DBG(CAM_FLASH, + "master_type: %d", fctrl->io_master_info.master_type); + /* Initialize cci_client */ + if (fctrl->io_master_info.master_type == CCI_MASTER) { + fctrl->io_master_info.cci_client = kzalloc(sizeof( + struct cam_sensor_cci_client), GFP_KERNEL); + if (!(fctrl->io_master_info.cci_client)) + return -ENOMEM; + } else if (fctrl->io_master_info.master_type == I2C_MASTER) { + if (!(fctrl->io_master_info.client)) + return -EINVAL; + } else { + CAM_ERR(CAM_FLASH, + "Invalid master / Master type Not supported"); + return -EINVAL; + } + + return 0; +} + static const struct of_device_id cam_flash_dt_match[] = { {.compatible = "qcom,camera-flash", .data = NULL}, {} @@ -289,20 +311,36 @@ static int cam_flash_platform_remove(struct platform_device *pdev) return 0; } +static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client) +{ + int32_t rc = 0; + struct cam_flash_ctrl *fctrl = i2c_get_clientdata(client); + /* Handle I2C Devices */ + if (!fctrl) { + CAM_ERR(CAM_FLASH, "Flash device is NULL"); + return -EINVAL; + } + /*Free Allocated Mem */ + kfree(fctrl->i2c_data.per_frame); + fctrl->i2c_data.per_frame = NULL; + kfree(fctrl); + return rc; +} + static int cam_flash_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { - struct cam_flash_ctrl *flash_ctrl = + struct cam_flash_ctrl *fctrl = v4l2_get_subdevdata(sd); - if (!flash_ctrl) { + if (!fctrl) { CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL"); return -EINVAL; } - mutex_lock(&flash_ctrl->flash_mutex); - cam_flash_shutdown(flash_ctrl); - mutex_unlock(&flash_ctrl->flash_mutex); + mutex_lock(&fctrl->flash_mutex); + cam_flash_shutdown(fctrl); + mutex_unlock(&fctrl->flash_mutex); return 0; } @@ -322,10 +360,30 @@ static const struct v4l2_subdev_internal_ops cam_flash_internal_ops = { .close = cam_flash_subdev_close, }; +static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl) +{ + int rc = 0; + + fctrl->v4l2_dev_str.internal_ops = + &cam_flash_internal_ops; + fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops; + fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME; + fctrl->v4l2_dev_str.sd_flags = + V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; + fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE; + fctrl->v4l2_dev_str.token = fctrl; + + rc = cam_register_subdev(&(fctrl->v4l2_dev_str)); + if (rc) + CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc); + + return rc; +} + static int32_t cam_flash_platform_probe(struct platform_device *pdev) { - int32_t rc = 0; - struct cam_flash_ctrl *flash_ctrl = NULL; + int32_t rc = 0, i = 0; + struct cam_flash_ctrl *fctrl = NULL; CAM_DBG(CAM_FLASH, "Enter"); if (!pdev->dev.of_node) { @@ -333,53 +391,181 @@ static int32_t cam_flash_platform_probe(struct platform_device *pdev) return -EINVAL; } - flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL); - if (!flash_ctrl) + fctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL); + if (!fctrl) return -ENOMEM; - flash_ctrl->pdev = pdev; - flash_ctrl->soc_info.pdev = pdev; - flash_ctrl->soc_info.dev = &pdev->dev; - flash_ctrl->soc_info.dev_name = pdev->name; + fctrl->pdev = pdev; + fctrl->soc_info.pdev = pdev; + fctrl->soc_info.dev = &pdev->dev; + fctrl->soc_info.dev_name = pdev->name; + + platform_set_drvdata(pdev, fctrl); - rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info); + rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info); if (rc) { CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc); - kfree(flash_ctrl); + kfree(fctrl); return -EINVAL; } - flash_ctrl->v4l2_dev_str.internal_ops = - &cam_flash_internal_ops; - flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops; - flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME; - flash_ctrl->v4l2_dev_str.sd_flags = - V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; - flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE; - flash_ctrl->v4l2_dev_str.token = flash_ctrl; + if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) { + /* Get CCI master */ + rc = of_property_read_u32(pdev->dev.of_node, "cci-master", + &fctrl->cci_i2c_master); + CAM_DBG(CAM_FLASH, "cci-master %d, rc %d", + fctrl->cci_i2c_master, rc); + if (rc < 0) { + /* Set default master 0 */ + fctrl->cci_i2c_master = MASTER_0; + rc = 0; + } + + fctrl->io_master_info.master_type = CCI_MASTER; + rc = cam_flash_init_default_params(fctrl); + if (rc) { + CAM_ERR(CAM_FLASH, + "failed: cam_flash_init_default_params rc %d", + rc); + return rc; + } - rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str)); + fctrl->i2c_data.per_frame = (struct i2c_settings_array *) + kzalloc(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (fctrl->i2c_data.per_frame == NULL) { + CAM_ERR(CAM_FLASH, "No Memory"); + rc = -ENOMEM; + goto free_cci_resource; + } + + INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head)); + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD( + &(fctrl->i2c_data.per_frame[i].list_head)); + + fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting; + fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops; + fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request; + } else { + /* PMIC Flash */ + fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting; + fctrl->func_tbl.power_ops = cam_flash_pmic_power_ops; + fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request; + } + + rc = cam_flash_init_subdev(fctrl); if (rc) { - CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc); - goto free_resource; + if (fctrl->io_master_info.cci_client != NULL) + goto free_cci_resource; + else + goto free_resource; } - flash_ctrl->bridge_intf.device_hdl = -1; - flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info; - flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link; - flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request; - flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request; - platform_set_drvdata(pdev, flash_ctrl); - v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl); + fctrl->bridge_intf.device_hdl = -1; + fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info; + fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link; + fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request; + fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request; - mutex_init(&(flash_ctrl->flash_mutex)); - mutex_init(&(flash_ctrl->flash_wq_mutex)); + mutex_init(&(fctrl->flash_mutex)); - flash_ctrl->flash_state = CAM_FLASH_STATE_INIT; + fctrl->flash_state = CAM_FLASH_STATE_INIT; CAM_DBG(CAM_FLASH, "Probe success"); return rc; + +free_cci_resource: + kfree(fctrl->io_master_info.cci_client); + fctrl->io_master_info.cci_client = NULL; free_resource: - kfree(flash_ctrl); + kfree(fctrl->i2c_data.per_frame); + kfree(fctrl->soc_info.soc_private); + cam_soc_util_release_platform_resource(&fctrl->soc_info); + fctrl->i2c_data.per_frame = NULL; + fctrl->soc_info.soc_private = NULL; + kfree(fctrl); + fctrl = NULL; + return rc; +} + +static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int32_t rc = 0, i = 0; + struct cam_flash_ctrl *fctrl; + + if (client == NULL || id == NULL) { + CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK", + client, id); + return -EINVAL; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed", + client->name); + return -EFAULT; + } + + /* Create sensor control structure */ + fctrl = kzalloc(sizeof(*fctrl), GFP_KERNEL); + if (!fctrl) + return -ENOMEM; + + i2c_set_clientdata(client, fctrl); + + fctrl->io_master_info.client = client; + fctrl->soc_info.dev = &client->dev; + fctrl->soc_info.dev_name = client->name; + fctrl->io_master_info.master_type = I2C_MASTER; + + rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc); + goto free_ctrl; + } + + rc = cam_flash_init_subdev(fctrl); + if (rc) + goto free_ctrl; + + fctrl->i2c_data.per_frame = + (struct i2c_settings_array *) + kzalloc(sizeof(struct i2c_settings_array) * + MAX_PER_FRAME_ARRAY, GFP_KERNEL); + if (fctrl->i2c_data.per_frame == NULL) { + rc = -ENOMEM; + goto unreg_subdev; + } + + INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head)); + INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head)); + for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) + INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head)); + + fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser; + fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting; + fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops; + fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request; + + fctrl->bridge_intf.device_hdl = -1; + fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info; + fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link; + fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request; + fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request; + + mutex_init(&(fctrl->flash_mutex)); + fctrl->flash_state = CAM_FLASH_STATE_INIT; + + return rc; + +unreg_subdev: + cam_unregister_subdev(&(fctrl->v4l2_dev_str)); +free_ctrl: + kfree(fctrl); + fctrl = NULL; return rc; } @@ -396,20 +582,40 @@ static struct platform_driver cam_flash_platform_driver = { }, }; -static int __init cam_flash_init_module(void) +static const struct i2c_device_id i2c_id[] = { + {FLASH_DRIVER_I2C, (kernel_ulong_t)NULL}, + { } +}; + +static struct i2c_driver cam_flash_i2c_driver = { + .id_table = i2c_id, + .probe = cam_flash_i2c_driver_probe, + .remove = cam_flash_i2c_driver_remove, + .driver = { + .name = FLASH_DRIVER_I2C, + }, +}; + +static int32_t __init cam_flash_init_module(void) { int32_t rc = 0; rc = platform_driver_register(&cam_flash_platform_driver); - if (rc) - CAM_ERR(CAM_FLASH, "platform probe for flash failed"); + if (rc == 0) { + CAM_DBG(CAM_FLASH, "platform probe success"); + return 0; + } + rc = i2c_add_driver(&cam_flash_i2c_driver); + if (rc) + CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc); return rc; } static void __exit cam_flash_exit_module(void) { platform_driver_unregister(&cam_flash_platform_driver); + i2c_del_driver(&cam_flash_i2c_driver); } module_init(cam_flash_init_module); diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h index 0e8d4ee7c611fd6bea87d6bd45ff895c4471baa8..3c8fd3e2d33ec85f42defc0fe002a568a69ea917 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h @@ -34,15 +34,21 @@ #include "cam_sensor_cmn_header.h" #include "cam_soc_util.h" #include "cam_debug_util.h" +#include "cam_sensor_io.h" +#include "cam_flash_core.h" #define CAMX_FLASH_DEV_NAME "cam-flash-dev" #define CAM_FLASH_PIPELINE_DELAY 1 +#define FLASH_DRIVER_I2C "i2c_flash" + #define CAM_FLASH_PACKET_OPCODE_INIT 0 #define CAM_FLASH_PACKET_OPCODE_SET_OPS 1 #define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2 +struct cam_flash_ctrl; + enum cam_flash_switch_trigger_ops { LED_SWITCH_OFF = 0, LED_SWITCH_ON, @@ -55,6 +61,12 @@ enum cam_flash_state { CAM_FLASH_STATE_START, }; +enum cam_flash_flush_type { + FLUSH_ALL = 0, + FLUSH_REQ, + FLUSH_MAX, +}; + /** * struct cam_flash_intf_params * @device_hdl : Device Handle @@ -139,6 +151,14 @@ struct cam_flash_private_soc { bool is_wled_flash; }; +struct cam_flash_func_tbl { + int (*parser)(struct cam_flash_ctrl *fctrl, void *arg); + int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id); + int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable); + int (*flush_req)(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id); +}; + /** * struct cam_flash_ctrl * @soc_info : Soc related information @@ -153,32 +173,57 @@ struct cam_flash_private_soc { * @flash_num_sources : Number of flash sources * @torch_num_source : Number of torch sources * @flash_mutex : Mutex for flash operations - * @flash_wq_mutex : Mutex for flash apply setting - * @flash_state : Current flash state (LOW/OFF/ON/INIT) + * @flash_state : Current flash state (LOW/OFF/ON/INIT) * @flash_type : Flash types (PMIC/I2C/GPIO) * @is_regulator_enable : Regulator disable/enable notifier + * @func_tbl : Function table for different HW + * (e.g. i2c/pmic/gpio) * @flash_trigger : Flash trigger ptr * @torch_trigger : Torch trigger ptr + * @cci_i2c_master : I2C structure + * @io_master_info : Information about the communication master + * @i2c_data : I2C register settings */ struct cam_flash_ctrl { - struct cam_hw_soc_info soc_info; - struct platform_device *pdev; - struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY]; - struct cam_flash_frame_setting nrt_info; - struct device_node *of_node; - struct cam_subdev v4l2_dev_str; - struct cam_flash_intf_params bridge_intf; - struct cam_flash_init_packet flash_init_setting; - struct led_trigger *switch_trigger; - uint32_t flash_num_sources; - uint32_t torch_num_sources; - struct mutex flash_mutex; - struct mutex flash_wq_mutex; - enum cam_flash_state flash_state; - uint8_t flash_type; - bool is_regulator_enabled; + struct cam_hw_soc_info soc_info; + struct platform_device *pdev; + struct cam_sensor_power_ctrl_t power_info; + struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY]; + struct cam_flash_frame_setting nrt_info; + struct device_node *of_node; + struct cam_subdev v4l2_dev_str; + struct cam_flash_intf_params bridge_intf; + struct cam_flash_init_packet flash_init_setting; + struct led_trigger *switch_trigger; + uint32_t flash_num_sources; + uint32_t torch_num_sources; + struct mutex flash_mutex; + enum cam_flash_state flash_state; + uint8_t flash_type; + bool is_regulator_enabled; + struct cam_flash_func_tbl func_tbl; struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS]; struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS]; +/* I2C related setting */ + enum cci_i2c_master_t cci_i2c_master; + struct camera_io_master io_master_info; + struct i2c_data_settings i2c_data; }; +int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg); +int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg); +int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id); +int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id); +int cam_flash_off(struct cam_flash_ctrl *fctrl); +int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable); +int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl, + bool regulator_enable); +int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type type, uint64_t req_id); +int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl, + enum cam_flash_flush_type, uint64_t req_id); +void cam_flash_shutdown(struct cam_flash_ctrl *fctrl); +int cam_flash_release_dev(struct cam_flash_ctrl *fctrl); + #endif /*_CAM_FLASH_DEV_H_*/ diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c index d5f583a72f48a628f843fa6ab98c0700bd28e748..2e3d83fa5d4f62357447844cc4243192e1aae770 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c @@ -227,32 +227,31 @@ int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl, return -EINVAL; } - of_node = fctrl->pdev->dev.of_node; - - rc = cam_soc_util_get_dt_properties(soc_info); - if (rc < 0) { - CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc); - return rc; - } - soc_info->soc_private = kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL); if (!soc_info->soc_private) { rc = -ENOMEM; goto release_soc_res; } + of_node = fctrl->pdev->dev.of_node; + + rc = cam_soc_util_get_dt_properties(soc_info); + if (rc) { + CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc); + goto free_soc_private; + } rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private); - if (rc < 0) { + if (rc) { CAM_ERR(CAM_FLASH, "cam_flash_get_pmic_source_info failed rc %d", rc); goto free_soc_private; } - return rc; free_soc_private: kfree(soc_info->soc_private); + soc_info->soc_private = NULL; release_soc_res: cam_soc_util_release_platform_resource(soc_info); return rc; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c index a46e96f1f241a5fd1b282a44a393131471b5a428..5b0160f43b128fba6e75cb7a8dc55206db8e4b1d 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c @@ -193,7 +193,7 @@ static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl) return -EINVAL; } - rc = msm_camera_power_down(power_info, soc_info); + rc = cam_sensor_util_power_down(power_info, soc_info); if (rc) { CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc); return rc; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c index a2431be711766226db02a99c58c00ac7cace8d8d..4aae41e70fe1f3cc0113c8fd0a19c58c7f8e42b8 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c @@ -62,30 +62,12 @@ static void cam_sensor_release_stream_rsc( } } -static void cam_sensor_release_resource( +static void cam_sensor_release_per_frame_resource( struct cam_sensor_ctrl_t *s_ctrl) { struct i2c_settings_array *i2c_set = NULL; int i, rc; - i2c_set = &(s_ctrl->i2c_data.init_settings); - if (i2c_set->is_settings_valid == 1) { - i2c_set->is_settings_valid = -1; - rc = delete_request(i2c_set); - if (rc < 0) - CAM_ERR(CAM_SENSOR, - "failed while deleting Init settings"); - } - - i2c_set = &(s_ctrl->i2c_data.config_settings); - if (i2c_set->is_settings_valid == 1) { - i2c_set->is_settings_valid = -1; - rc = delete_request(i2c_set); - if (rc < 0) - CAM_ERR(CAM_SENSOR, - "failed while deleting Res settings"); - } - if (s_ctrl->i2c_data.per_frame != NULL) { for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) { i2c_set = &(s_ctrl->i2c_data.per_frame[i]); @@ -503,10 +485,9 @@ void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl) (s_ctrl->is_probe_succeed == 0)) return; - cam_sensor_release_resource(s_ctrl); cam_sensor_release_stream_rsc(s_ctrl); - if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE) - cam_sensor_power_down(s_ctrl); + cam_sensor_release_per_frame_resource(s_ctrl); + cam_sensor_power_down(s_ctrl); rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl); if (rc < 0) @@ -731,7 +712,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, goto release_mutex; } - cam_sensor_release_resource(s_ctrl); + cam_sensor_release_per_frame_resource(s_ctrl); cam_sensor_release_stream_rsc(s_ctrl); if (s_ctrl->bridge_intf.device_hdl == -1) { CAM_ERR(CAM_SENSOR, @@ -816,7 +797,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, } } - cam_sensor_release_resource(s_ctrl); + cam_sensor_release_per_frame_resource(s_ctrl); s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE; CAM_INFO(CAM_SENSOR, "CAM_STOP_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x", @@ -918,6 +899,8 @@ int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link) CAM_ERR(CAM_SENSOR, "Device data is NULL"); return -EINVAL; } + + mutex_lock(&s_ctrl->cam_sensor_mutex); if (link->link_enable) { s_ctrl->bridge_intf.link_hdl = link->link_hdl; s_ctrl->bridge_intf.crm_cb = link->crm_cb; @@ -925,6 +908,7 @@ int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link) s_ctrl->bridge_intf.link_hdl = -1; s_ctrl->bridge_intf.crm_cb = NULL; } + mutex_unlock(&s_ctrl->cam_sensor_mutex); return 0; } @@ -1005,7 +989,7 @@ int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl) CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info); return -EINVAL; } - rc = msm_camera_power_down(power_info, soc_info); + rc = cam_sensor_util_power_down(power_info, soc_info); if (rc < 0) { CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc); return rc; @@ -1155,8 +1139,10 @@ int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply) } CAM_DBG(CAM_REQ, " Sensor update req id: %lld", apply->request_id); trace_cam_apply_req("Sensor", apply->request_id); + mutex_lock(&(s_ctrl->cam_sensor_mutex)); rc = cam_sensor_apply_settings(s_ctrl, apply->request_id, CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); return rc; } @@ -1190,7 +1176,9 @@ int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req) continue; if (i2c_set->is_settings_valid == 1) { + mutex_lock(&(s_ctrl->cam_sensor_mutex)); rc = delete_request(i2c_set); + mutex_unlock(&(s_ctrl->cam_sensor_mutex)); if (rc < 0) CAM_ERR(CAM_SENSOR, "delete request: %lld rc: %d", diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c index 9450785c9c530a960630627fb2e45dcead622fb3..7a5481d00351af10a1eb3062976c7c276e10873f 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c @@ -439,6 +439,75 @@ int cam_sensor_i2c_command_parser( return rc; } +int cam_sensor_util_i2c_apply_setting( + struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list) +{ + int32_t rc = 0; + uint32_t i, size; + + switch (i2c_list->op_code) { + case CAM_SENSOR_I2C_WRITE_RANDOM: { + rc = camera_io_dev_write(io_master_info, + &(i2c_list->i2c_settings)); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to random write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_WRITE_SEQ: { + rc = camera_io_dev_write_continuous( + io_master_info, &(i2c_list->i2c_settings), 0); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to seq write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_WRITE_BURST: { + rc = camera_io_dev_write_continuous( + io_master_info, &(i2c_list->i2c_settings), 1); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "Failed to burst write I2C settings: %d", + rc); + return rc; + } + break; + } + case CAM_SENSOR_I2C_POLL: { + size = i2c_list->i2c_settings.size; + for (i = 0; i < size; i++) { + rc = camera_io_dev_poll( + io_master_info, + i2c_list->i2c_settings.reg_setting[i].reg_addr, + i2c_list->i2c_settings.reg_setting[i].reg_data, + i2c_list->i2c_settings.reg_setting[i].data_mask, + i2c_list->i2c_settings.addr_type, + i2c_list->i2c_settings.data_type, + i2c_list->i2c_settings.reg_setting[i].delay); + if (rc < 0) { + CAM_ERR(CAM_SENSOR, + "i2c poll apply setting Fail: %d", rc); + return rc; + } + } + break; + } + default: + CAM_ERR(CAM_SENSOR, "Wrong Opcode: %d", i2c_list->op_code); + rc = -EINVAL; + break; + } + + return rc; +} + int32_t msm_camera_fill_vreg_params( struct cam_hw_soc_info *soc_info, struct cam_sensor_power_setting *power_setting, @@ -1711,7 +1780,7 @@ msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl, return ps; } -int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl, +int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl, struct cam_hw_soc_info *soc_info) { int index = 0, ret = 0, num_vreg = 0, i; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h index 6c0287e48487d033264d8082ac3cc113c1d59556..583ddb14243b0961940bc91c12ff137a919c5c5d 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h @@ -39,6 +39,9 @@ int cam_sensor_i2c_command_parser(struct camera_io_master *io_master, struct i2c_settings_array *i2c_reg_settings, struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers); +int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info, + struct i2c_settings_list *i2c_list); + int32_t delete_request(struct i2c_settings_array *i2c_array); int cam_sensor_util_request_gpio_table( struct cam_hw_soc_info *soc_info, int gpio_en); @@ -49,7 +52,7 @@ int cam_sensor_util_init_gpio_pin_tbl( int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl, struct cam_hw_soc_info *soc_info); -int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl, +int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl, struct cam_hw_soc_info *soc_info); int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info, diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile index e17dac6c0d9da395603ccc23b49b5e7d7a723c16..96f39680fd0c33f69ba06b2c56ee5ee3d7873345 100644 --- a/drivers/media/platform/msm/camera/cam_smmu/Makefile +++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile @@ -1,3 +1,4 @@ ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils +ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c index 75a841222718c17224872780dd7c68424d63d386..242b1aafdc03987eed74536717ce6497acb2c66a 100644 --- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c +++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "cam_smmu_api.h" #include "cam_debug_util.h" @@ -34,11 +35,14 @@ #define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE) #define COOKIE_MASK ((1<> COOKIE_SIZE) & COOKIE_MASK) +static int g_num_pf_handled = 4; +module_param(g_num_pf_handled, int, 0644); + struct firmware_alloc_info { struct device *fw_dev; void *fw_kva; @@ -129,12 +133,11 @@ struct cam_context_bank_info { int handle; enum cam_smmu_ops_param state; - void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *, - struct device *, unsigned long, - int, void*); + cam_smmu_client_page_fault_handler handler[CAM_SMMU_CB_MAX]; void *token[CAM_SMMU_CB_MAX]; int cb_count; int secure_count; + int pf_count; }; struct cam_iommu_cb_set { @@ -250,13 +253,14 @@ static void cam_smmu_print_table(void); static int cam_smmu_probe(struct platform_device *pdev); -static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr); +static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr); static void cam_smmu_page_fault_work(struct work_struct *work) { int j; int idx; struct cam_smmu_work_payload *payload; + uint32_t buf_info; mutex_lock(&iommu_cb_set.payload_list_lock); if (list_empty(&iommu_cb_set.payload_list)) { @@ -273,8 +277,11 @@ static void cam_smmu_page_fault_work(struct work_struct *work) /* Dereference the payload to call the handler */ idx = payload->idx; - mutex_lock(&iommu_cb_set.cb_info[idx].lock); - cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova); + buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova); + if (buf_info != 0) { + CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx); + } + for (j = 0; j < CAM_SMMU_CB_MAX; j++) { if ((iommu_cb_set.cb_info[idx].handler[j])) { iommu_cb_set.cb_info[idx].handler[j]( @@ -282,10 +289,10 @@ static void cam_smmu_page_fault_work(struct work_struct *work) payload->dev, payload->iova, payload->flags, - iommu_cb_set.cb_info[idx].token[j]); + iommu_cb_set.cb_info[idx].token[j], + buf_info); } } - mutex_unlock(&iommu_cb_set.cb_info[idx].lock); kfree(payload); } @@ -331,10 +338,13 @@ static void cam_smmu_print_table(void) } } -static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr) +static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr) { - struct cam_dma_buff_info *mapping; + struct cam_dma_buff_info *mapping, *closest_mapping = NULL; unsigned long start_addr, end_addr, current_addr; + uint32_t buf_handle = 0; + + long delta = 0, lowest_delta = 0; current_addr = (unsigned long)vaddr; list_for_each_entry(mapping, @@ -342,31 +352,51 @@ static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr) start_addr = (unsigned long)mapping->paddr; end_addr = (unsigned long)mapping->paddr + mapping->len; - if (start_addr <= current_addr && current_addr < end_addr) { - CAM_ERR(CAM_SMMU, - "va %pK valid: range:%pK-%pK, fd = %d cb: %s", - vaddr, (void *)start_addr, (void *)end_addr, - mapping->ion_fd, + if (start_addr <= current_addr && current_addr <= end_addr) { + closest_mapping = mapping; + CAM_INFO(CAM_SMMU, + "Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s", + current_addr, start_addr, + end_addr, mapping->ion_fd, iommu_cb_set.cb_info[idx].name); goto end; } else { + if (start_addr > current_addr) + delta = start_addr - current_addr; + else + delta = current_addr - end_addr - 1; + + if (delta < lowest_delta || lowest_delta == 0) { + lowest_delta = delta; + closest_mapping = mapping; + } CAM_DBG(CAM_SMMU, - "va %pK is not in this range: %pK-%pK, fd = %d", - vaddr, (void *)start_addr, (void *)end_addr, - mapping->ion_fd); + "approx va %lx not in range: %lx-%lx fd = %0x", + current_addr, start_addr, + end_addr, mapping->ion_fd); } } - CAM_ERR(CAM_SMMU, - "Cannot find vaddr:%pK in SMMU %s uses invalid virt address", - vaddr, iommu_cb_set.cb_info[idx].name); -end: - return; -} -void cam_smmu_reg_client_page_fault_handler(int handle, - void (*client_page_fault_handler)(struct iommu_domain *, - struct device *, unsigned long, - int, void*), void *token) +end: + if (closest_mapping) { + buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd); + CAM_INFO(CAM_SMMU, + "Closest map fd %d 0x%lx 0x%lx-0x%lx buf=%pK mem %0x", + closest_mapping->ion_fd, current_addr, + (unsigned long)closest_mapping->paddr, + (unsigned long)closest_mapping->paddr + mapping->len, + closest_mapping->buf, + buf_handle); + } else + CAM_INFO(CAM_SMMU, + "Cannot find vaddr:%lx in SMMU %s virt address", + current_addr, iommu_cb_set.cb_info[idx].name); + + return buf_handle; +} + +void cam_smmu_set_client_page_fault_handler(int handle, + cam_smmu_client_page_fault_handler handler_cb, void *token) { int idx, i = 0; @@ -392,7 +422,7 @@ void cam_smmu_reg_client_page_fault_handler(int handle, return; } - if (client_page_fault_handler) { + if (handler_cb) { if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) { CAM_ERR(CAM_SMMU, "%s Should not regiester more handlers", @@ -400,12 +430,14 @@ void cam_smmu_reg_client_page_fault_handler(int handle, mutex_unlock(&iommu_cb_set.cb_info[idx].lock); return; } + iommu_cb_set.cb_info[idx].cb_count++; + for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) { if (iommu_cb_set.cb_info[idx].token[i] == NULL) { iommu_cb_set.cb_info[idx].token[i] = token; iommu_cb_set.cb_info[idx].handler[i] = - client_page_fault_handler; + handler_cb; break; } } @@ -427,6 +459,47 @@ void cam_smmu_reg_client_page_fault_handler(int handle, mutex_unlock(&iommu_cb_set.cb_info[idx].lock); } +void cam_smmu_unset_client_page_fault_handler(int handle, void *token) +{ + int idx, i = 0; + + if (!token || (handle == HANDLE_INIT)) { + CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle"); + return; + } + + idx = GET_SMMU_TABLE_IDX(handle); + if (idx < 0 || idx >= iommu_cb_set.cb_num) { + CAM_ERR(CAM_SMMU, + "Error: handle or index invalid. idx = %d hdl = %x", + idx, handle); + return; + } + + mutex_lock(&iommu_cb_set.cb_info[idx].lock); + if (iommu_cb_set.cb_info[idx].handle != handle) { + CAM_ERR(CAM_SMMU, + "Error: hdl is not valid, table_hdl = %x, hdl = %x", + iommu_cb_set.cb_info[idx].handle, handle); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); + return; + } + + for (i = 0; i < CAM_SMMU_CB_MAX; i++) { + if (iommu_cb_set.cb_info[idx].token[i] == token) { + iommu_cb_set.cb_info[idx].token[i] = NULL; + iommu_cb_set.cb_info[idx].handler[i] = + NULL; + iommu_cb_set.cb_info[idx].cb_count--; + break; + } + } + if (i == CAM_SMMU_CB_MAX) + CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s", + handle, iommu_cb_set.cb_info[idx].name); + mutex_unlock(&iommu_cb_set.cb_info[idx].lock); +} + static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *token) @@ -457,6 +530,13 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, return -EINVAL; } + if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) { + CAM_INFO(CAM_SMMU, "PF already handled %d %d %d", + g_num_pf_handled, idx, + iommu_cb_set.cb_info[idx].pf_count); + return -EINVAL; + } + payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC); if (!payload) return -EINVAL; @@ -472,7 +552,7 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain, list_add_tail(&payload->list, &iommu_cb_set.payload_list); mutex_unlock(&iommu_cb_set.payload_list_lock); - schedule_work(&iommu_cb_set.smmu_work); + cam_smmu_page_fault_work(&iommu_cb_set.smmu_work); return -EINVAL; } @@ -526,6 +606,7 @@ void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops) iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH; iommu_cb_set.cb_info[i].dev = NULL; iommu_cb_set.cb_info[i].cb_count = 0; + iommu_cb_set.cb_info[i].pf_count = 0; for (j = 0; j < CAM_SMMU_CB_MAX; j++) { iommu_cb_set.cb_info[i].token[j] = NULL; iommu_cb_set.cb_info[i].handler[j] = NULL; @@ -1580,7 +1661,9 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf, if (rc < 0) { CAM_ERR(CAM_SMMU, - "IOVA alloc failed for shared memory"); + "IOVA alloc failed for shared memory, size=%zu, idx=%d, handle=%d", + *len_ptr, idx, + iommu_cb_set.cb_info[idx].handle); goto err_unmap_sg; } @@ -1596,7 +1679,9 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf, rc = -ENOMEM; goto err_unmap_sg; } else { - CAM_DBG(CAM_SMMU, "iommu_map_sg returned %zu", size); + CAM_DBG(CAM_SMMU, + "iommu_map_sg returned iova=%pK, size=%zu", + iova, size); *paddr_ptr = iova; *len_ptr = size; } @@ -1618,8 +1703,8 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf, goto err_unmap_sg; } - CAM_DBG(CAM_SMMU, "region_id=%d, paddr=%pK, len=%d", - region_id, *paddr_ptr, *len_ptr); + CAM_DBG(CAM_SMMU, "iova=%pK, region_id=%d, paddr=%pK, len=%d", + iova, region_id, *paddr_ptr, *len_ptr); if (table->sgl) { CAM_DBG(CAM_SMMU, @@ -1655,11 +1740,12 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf, if (!*paddr_ptr || !*len_ptr) { CAM_ERR(CAM_SMMU, "Error: Space Allocation failed"); kfree(*mapping_info); + *mapping_info = NULL; rc = -ENOSPC; goto err_alloc; } - CAM_DBG(CAM_SMMU, "dma_buf = %pK, dev = %pK, paddr= %pK, len = %u", - buf, (void *)iommu_cb_set.cb_info[idx].dev, + CAM_DBG(CAM_SMMU, "idx=%d, dma_buf=%pK, dev=%pK, paddr=%pK, len=%u", + idx, buf, (void *)iommu_cb_set.cb_info[idx].dev, (void *)*paddr_ptr, (unsigned int)*len_ptr); return 0; @@ -2253,7 +2339,9 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, dmabuf = dma_buf_get(ion_fd); if (IS_ERR_OR_NULL((void *)(dmabuf))) { - CAM_ERR(CAM_SMMU, "Error: dma buf get failed"); + CAM_ERR(CAM_SMMU, + "Error: dma buf get failed, idx=%d, ion_fd=%d", + idx, ion_fd); rc = PTR_ERR(dmabuf); goto err_out; } @@ -2265,7 +2353,9 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, */ attach = dma_buf_attach(dmabuf, iommu_cb_set.cb_info[idx].dev); if (IS_ERR_OR_NULL(attach)) { - CAM_ERR(CAM_SMMU, "Error: dma buf attach failed"); + CAM_ERR(CAM_SMMU, + "Error: dma buf attach failed, idx=%d, ion_fd=%d", + idx, ion_fd); rc = PTR_ERR(attach); goto err_put; } @@ -2297,8 +2387,8 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, mapping_info->ref_count = 1; mapping_info->buf = dmabuf; - CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u", - ion_fd, + CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, dev=%pK, paddr=%pK, len=%u", + idx, ion_fd, (void *)iommu_cb_set.cb_info[idx].dev, (void *)*paddr_ptr, (unsigned int)*len_ptr); @@ -2354,15 +2444,16 @@ int cam_smmu_map_stage2_iova(int handle, if (!iommu_cb_set.cb_info[idx].is_secure) { CAM_ERR(CAM_SMMU, - "Error: can't map secure mem to non secure cb"); + "Error: can't map secure mem to non secure cb, idx=%d", + idx); return -EINVAL; } mutex_lock(&iommu_cb_set.cb_info[idx].lock); if (iommu_cb_set.cb_info[idx].handle != handle) { CAM_ERR(CAM_SMMU, - "Error: hdl is not valid, table_hdl = %x, hdl = %x", - iommu_cb_set.cb_info[idx].handle, handle); + "Error: hdl is not valid, idx=%d, table_hdl=%x, hdl=%x", + idx, iommu_cb_set.cb_info[idx].handle, handle); rc = -EINVAL; goto get_addr_end; } @@ -2370,15 +2461,18 @@ int cam_smmu_map_stage2_iova(int handle, buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr); if (buf_state == CAM_SMMU_BUFF_EXIST) { - CAM_DBG(CAM_SMMU, "fd:%d already in list, give same addr back", - ion_fd); + CAM_DBG(CAM_SMMU, + "fd:%d already in list idx:%d, handle=%d give same addr back", + ion_fd, idx, handle); rc = 0; goto get_addr_end; } rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir, paddr_ptr, len_ptr); if (rc < 0) { - CAM_ERR(CAM_SMMU, "Error: mapping or add list fail"); + CAM_ERR(CAM_SMMU, + "Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d", + idx, handle, ion_fd, rc); goto get_addr_end; } @@ -2519,14 +2613,16 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, mutex_lock(&iommu_cb_set.cb_info[idx].lock); if (iommu_cb_set.cb_info[idx].is_secure) { CAM_ERR(CAM_SMMU, - "Error: can't map non-secure mem to secure cb"); + "Error: can't map non-secure mem to secure cb idx=%d", + idx); rc = -EINVAL; goto get_addr_end; } if (iommu_cb_set.cb_info[idx].handle != handle) { - CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x", - iommu_cb_set.cb_info[idx].handle, handle); + CAM_ERR(CAM_SMMU, + "hdl is not valid, idx=%d, table_hdl = %x, hdl = %x", + idx, iommu_cb_set.cb_info[idx].handle, handle); rc = -EINVAL; goto get_addr_end; } @@ -2542,7 +2638,8 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr); if (buf_state == CAM_SMMU_BUFF_EXIST) { CAM_ERR(CAM_SMMU, - "ion_fd: %d already in the list", ion_fd); + "fd:%d already in list idx:%d, handle=%d, give same addr back", + ion_fd, idx, handle); rc = -EALREADY; goto get_addr_end; } @@ -2550,7 +2647,9 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir, paddr_ptr, len_ptr, region_id); if (rc < 0) - CAM_ERR(CAM_SMMU, "mapping or add list fail"); + CAM_ERR(CAM_SMMU, + "mapping or add list fail, idx=%d, fd=%d, region=%d, rc=%d", + idx, ion_fd, region_id, rc); get_addr_end: mutex_unlock(&iommu_cb_set.cb_info[idx].lock); diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h index 978fa47b98f3c313bec9ee0e6f075e91dbad7bd2..85259550825563b84e932c924340aa15c819f60b 100644 --- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h +++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h @@ -50,6 +50,21 @@ enum cam_smmu_region_id { CAM_SMMU_REGION_QDSS }; +/** + * @brief : Callback function type that gets called back on cam + * smmu page fault. + * + * @param domain : Iommu domain received in iommu page fault handler + * @param dev : Device received in iommu page fault handler + * @param iova : IOVA where page fault occurred + * @param flags : Flags received in iommu page fault handler + * @param token : Userdata given during callback registration + * @param buf_info : Closest mapped buffer info + */ +typedef void (*cam_smmu_client_page_fault_handler)(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags, void *token, + uint32_t buf_info); + /** * @brief : Structure to store region information * @@ -215,13 +230,19 @@ int cam_smmu_find_index_by_handle(int hdl); * @brief : Registers smmu fault handler for client * * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) - * @param client_page_fault_handler: It is triggered in IOMMU page fault + * @param handler_cb: It is triggered in IOMMU page fault + * @param token: It is input param when trigger page fault handler + */ +void cam_smmu_set_client_page_fault_handler(int handle, + cam_smmu_client_page_fault_handler handler_cb, void *token); + +/** + * @brief : Unregisters smmu fault handler for client + * + * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.) * @param token: It is input param when trigger page fault handler */ -void cam_smmu_reg_client_page_fault_handler(int handle, - void (*client_page_fault_handler)(struct iommu_domain *, - struct device *, unsigned long, - int, void*), void *token); +void cam_smmu_unset_client_page_fault_handler(int handle, void *token); /** * @brief Maps memory from an ION fd into IOVA space diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c index a82804aaa2c48feecf6e66486fc1862a86f29b9f..6eebe4a9f645ce93bde93022eaeaafb68190d917 100644 --- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c @@ -298,7 +298,7 @@ static int __init mpq_dmx_sw_plugin_init(void) rc = platform_driver_register(&mpq_dmx_sw_plugin_driver); if (rc) MPQ_DVB_ERR_PRINT( - "%s: mpq_dmx_sw_plugin: platform_driver_register failed: %d\n" + "%s: platform_driver_register failed: %d\n", __func__, rc); return rc; diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index 83ed68654dec6149686f1b0fcff921a69f471775..fd142099c3bb1af763f8f416c96a057ba61a2f51 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -28,6 +28,7 @@ #include #include #include +#include #include "npu_mgr.h" @@ -43,10 +44,11 @@ #define ROW_BYTES 16 #define GROUP_BYTES 4 -#define NUM_TOTAL_CLKS 20 +#define NUM_MAX_CLK_NUM 24 #define NPU_MAX_REGULATOR_NUM 2 #define NPU_MAX_DT_NAME_LEN 21 -#define NPU_MAX_PWRLEVELS 7 +#define NPU_MAX_PWRLEVELS 8 +#define NPU_MAX_STATS_BUF_SIZE 16384 /* ------------------------------------------------------------------------- * Data Structures @@ -108,7 +110,7 @@ struct npu_mbox { * @freq[]: NPU frequency vote in Hz */ struct npu_pwrlevel { - long clk_freq[NUM_TOTAL_CLKS]; + long clk_freq[NUM_MAX_CLK_NUM]; }; /* @@ -184,7 +186,7 @@ struct npu_device { uint32_t npu_phys; uint32_t core_clk_num; - struct npu_clk core_clks[NUM_TOTAL_CLKS]; + struct npu_clk core_clks[NUM_MAX_CLK_NUM]; uint32_t regulator_num; struct npu_regulator regulators[NPU_MAX_DT_NAME_LEN]; @@ -197,7 +199,7 @@ struct npu_device { struct npu_smmu_ctx smmu_ctx; struct npu_debugfs_ctx debugfs_ctx; - struct npu_mbox mbox[NPU_MAX_MBOX_NUM]; + struct npu_mbox mbox_aop; struct thermal_cooling_device *tcdev; struct npu_pwrctrl pwrctrl; diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c index 89d71f6a3f2148bd0190fe7b47ca93aea4b73be6..31a94d8a66495045a81bf15f7c72b5faf6ef2ff8 100644 --- a/drivers/media/platform/msm/npu/npu_debugfs.c +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -380,8 +380,12 @@ static ssize_t npu_debug_ctrl_write(struct file *file, if (npu_enable_core_power(npu_dev)) return -EPERM; + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(1), 2); REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2); npu_disable_core_power(npu_dev); + } else if (strcmp(buf, "ssr_wdt") == 0) { + pr_info("trigger wdt irq\n"); + npu_disable_post_pil_clocks(npu_dev); } else if (strcmp(buf, "loopback") == 0) { pr_debug("loopback test\n"); rc = npu_host_loopback_test(npu_dev); diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index 43891c7fe938993b91c7100f75748373a04c6e47..601a3213b99e233a8ecf754e138127b90256da0f 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -40,10 +40,7 @@ #define DDR_MAPPED_SIZE 0x60000000 #define PERF_MODE_DEFAULT 0 - -#define POWER_LEVEL_MIN_SVS 0 -#define POWER_LEVEL_LOW_SVS 1 -#define POWER_LEVEL_NOMINAL 4 +#define MBOX_OP_TIMEOUTMS 1000 /* ------------------------------------------------------------------------- * File Scope Prototypes @@ -312,6 +309,7 @@ int npu_enable_core_power(struct npu_device *npu_dev) void npu_disable_core_power(struct npu_device *npu_dev) { struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; + struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl; if (!pwr->pwr_vote_num) return; @@ -320,12 +318,11 @@ void npu_disable_core_power(struct npu_device *npu_dev) npu_suspend_devbw(npu_dev); npu_disable_core_clocks(npu_dev); npu_disable_regulators(npu_dev); + pwr->active_pwrlevel = thermalctrl->pwr_level; + pwr->uc_pwrlevel = pwr->max_pwrlevel; + pr_debug("setting back to power level=%d\n", + pwr->active_pwrlevel); } - /* init the power levels back to default */ - pwr->active_pwrlevel = pwr->default_pwrlevel; - pwr->uc_pwrlevel = pwr->default_pwrlevel; - pr_debug("setting back to default power level=%d\n", - pwr->default_pwrlevel); } static int npu_enable_core_clocks(struct npu_device *npu_dev) @@ -364,11 +361,6 @@ static uint32_t npu_calc_power_level(struct npu_device *npu_dev) else ret_level = therm_pwr_level; - /* adjust the power level */ - /* force to lowsvs, minsvs not supported */ - if (ret_level == POWER_LEVEL_MIN_SVS) - ret_level = POWER_LEVEL_LOW_SVS; - pr_debug("%s therm=%d active=%d uc=%d set level=%d\n", __func__, therm_pwr_level, active_pwr_level, uc_pwr_level, ret_level); @@ -382,20 +374,22 @@ static int npu_set_power_level(struct npu_device *npu_dev) int i, ret = 0; uint32_t pwr_level_to_set; - if (!pwr->pwr_vote_num) { - pr_err("power is not enabled during set request\n"); - return -EINVAL; - } - /* get power level to set */ pwr_level_to_set = npu_calc_power_level(npu_dev); + if (!pwr->pwr_vote_num) { + pr_debug("power is not enabled during set request\n"); + pwr->active_pwrlevel = pwr_level_to_set; + return 0; + } + /* if the same as current, dont do anything */ - if (pwr_level_to_set == pwr->active_pwrlevel) + if (pwr_level_to_set == pwr->active_pwrlevel) { + pr_debug("power level %d doesn't change\n", pwr_level_to_set); return 0; + } pr_debug("setting power level to [%d]\n", pwr_level_to_set); - pwr->active_pwrlevel = pwr_level_to_set; pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr->active_pwrlevel]; @@ -432,10 +426,13 @@ int npu_set_uc_power_level(struct npu_device *npu_dev, struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; if (perf_mode == PERF_MODE_DEFAULT) - pwr->uc_pwrlevel = POWER_LEVEL_NOMINAL; + pwr->uc_pwrlevel = pwr->default_pwrlevel; else pwr->uc_pwrlevel = perf_mode - 1; + if (pwr->uc_pwrlevel > pwr->max_pwrlevel) + pwr->uc_pwrlevel = pwr->max_pwrlevel; + return npu_set_power_level(npu_dev); } @@ -472,7 +469,7 @@ static void npu_suspend_devbw(struct npu_device *npu_dev) struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; int ret; - if (pwr->bwmon_enabled) { + if (pwr->bwmon_enabled && pwr->devbw) { pwr->bwmon_enabled = 0; ret = devfreq_suspend_devbw(pwr->devbw); if (ret) @@ -487,7 +484,7 @@ static void npu_resume_devbw(struct npu_device *npu_dev) struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; int ret; - if (!pwr->bwmon_enabled) { + if (!pwr->bwmon_enabled && pwr->devbw) { pwr->bwmon_enabled = 1; npu_restore_bw_registers(npu_dev); ret = devfreq_resume_devbw(pwr->devbw); @@ -537,7 +534,10 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) struct npu_pwrlevel *pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr->active_pwrlevel]; - for (i = 0; i < npu_dev->core_clk_num; i++) { + for (i = 0; i < ARRAY_SIZE(npu_clock_order); i++) { + if (!core_clks[i].clk) + continue; + if (post_pil) { if (!npu_is_post_clock(core_clks[i].clk_name)) continue; @@ -574,6 +574,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) if (rc) { for (i--; i >= 0; i--) { + if (!core_clks[i].clk) + continue; + if (post_pil) { if (!npu_is_post_clock(core_clks[i].clk_name)) continue; @@ -594,7 +597,10 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) int i = 0; struct npu_clk *core_clks = npu_dev->core_clks; - for (i = (npu_dev->core_clk_num)-1; i >= 0 ; i--) { + for (i = ARRAY_SIZE(npu_clock_order) - 1; i >= 0 ; i--) { + if (!core_clks[i].clk) + continue; + if (post_pil) { if (!npu_is_post_clock(core_clks[i].clk_name)) continue; @@ -712,6 +718,8 @@ int npu_enable_irq(struct npu_device *npu_dev) REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), NPU_ERROR_IRQ_MASK); REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_OWNER(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_WDOG_IRQ_OWNER(0), NPU_WDOG_IRQ_MASK); for (i = 0; i < NPU_MAX_IRQ; i++) { if (npu_dev->irq[i].irq != 0) { @@ -753,9 +761,10 @@ int npu_enable_sys_cache(struct npu_device *npu_dev) npu_dev->sys_cache = llcc_slice_getd(&(npu_dev->pdev->dev), "npu"); if (IS_ERR_OR_NULL(npu_dev->sys_cache)) { - pr_debug("unable to init sys cache\n"); + pr_warn("unable to init sys cache\n"); npu_dev->sys_cache = NULL; - return -ENODEV; + npu_dev->host_ctx.sys_cache_disable = true; + return 0; } /* set npu side regs - program SCID */ @@ -1108,9 +1117,9 @@ static int npu_exec_network_v2(struct npu_client *client, return -EINVAL; } - if (req.stats_buf_size > MSM_NPU_MAX_STATS_BUF_SIZE) { + if (req.stats_buf_size > NPU_MAX_STATS_BUF_SIZE) { pr_err("Invalid stats buffer size %d max %d\n", - req.stats_buf_size, MSM_NPU_MAX_STATS_BUF_SIZE); + req.stats_buf_size, NPU_MAX_STATS_BUF_SIZE); return -EINVAL; } @@ -1276,22 +1285,16 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev) rc = -EINVAL; goto clk_err; } - if (num_clk != NUM_TOTAL_CLKS) { - pr_err("number of clocks is invalid [%d] should be [%d]\n", - num_clk, NUM_TOTAL_CLKS); - rc = -EINVAL; - goto clk_err; - } npu_dev->core_clk_num = num_clk; for (i = 0; i < num_clk; i++) { of_property_read_string_index(pdev->dev.of_node, "clock-names", i, &clock_name); - for (j = 0; j < num_clk; j++) { + for (j = 0; j < ARRAY_SIZE(npu_clock_order); j++) { if (!strcmp(npu_clock_order[j], clock_name)) break; } - if (j == num_clk) { + if (j == ARRAY_SIZE(npu_clock_order)) { pr_err("clock is not in ordered list\n"); rc = -EINVAL; goto clk_err; @@ -1366,7 +1369,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, uint32_t i = 0; uint32_t j = 0; uint32_t index; - uint32_t clk_array_values[NUM_TOTAL_CLKS]; + uint32_t clk_array_values[NUM_MAX_CLK_NUM]; uint32_t clk_rate; struct npu_pwrlevel *level; @@ -1398,13 +1401,13 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, if (npu_is_exclude_rate_clock(clock_name)) continue; - for (j = 0; j < npu_dev->core_clk_num; j++) { + for (j = 0; j < ARRAY_SIZE(npu_clock_order); j++) { if (!strcmp(npu_clock_order[j], clock_name)) break; } - if (j == npu_dev->core_clk_num) { + if (j == ARRAY_SIZE(npu_clock_order)) { pr_err("pwrlevel clock is not in ordered list\n"); return -EINVAL; } @@ -1468,9 +1471,8 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev) return ret; } } else { - pr_err("bwdev is not defined in dts\n"); + pr_warn("bwdev is not defined in dts\n"); pwr->devbw = NULL; - ret = -EINVAL; } return ret; @@ -1523,6 +1525,29 @@ static int npu_irq_init(struct npu_device *npu_dev) return ret; } +static int npu_mbox_init(struct npu_device *npu_dev) +{ + struct platform_device *pdev = npu_dev->pdev; + struct npu_mbox *mbox_aop = &npu_dev->mbox_aop; + int ret = 0; + + if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) { + mbox_aop->client.dev = &pdev->dev; + mbox_aop->client.tx_block = true; + mbox_aop->client.tx_tout = MBOX_OP_TIMEOUTMS; + mbox_aop->client.knows_txdone = false; + + mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0); + if (IS_ERR(mbox_aop->chan)) { + ret = PTR_ERR(mbox_aop->chan); + pr_err("mailbox channel request failed, ret=%d\n", ret); + mbox_aop->chan = NULL; + } + } + + return ret; +} + /* ------------------------------------------------------------------------- * Probe/Remove * ------------------------------------------------------------------------- @@ -1571,6 +1596,10 @@ static int npu_probe(struct platform_device *pdev) if (rc) goto error_get_dev_num; + rc = npu_mbox_init(npu_dev); + if (rc) + goto error_get_dev_num; + npu_dev->npu_base = devm_ioremap(&pdev->dev, res->start, npu_dev->reg_size); if (unlikely(!npu_dev->npu_base)) { @@ -1679,6 +1708,8 @@ static int npu_probe(struct platform_device *pdev) class_destroy(npu_dev->class); error_class_create: unregister_chrdev_region(npu_dev->dev_num, 1); + if (npu_dev->mbox_aop.chan) + mbox_free_channel(npu_dev->mbox_aop.chan); error_get_dev_num: return rc; } @@ -1699,6 +1730,9 @@ static int npu_remove(struct platform_device *pdev) class_destroy(npu_dev->class); unregister_chrdev_region(npu_dev->dev_num, 1); platform_set_drvdata(pdev, NULL); + if (npu_dev->mbox_aop.chan) + mbox_free_channel(npu_dev->mbox_aop.chan); + return 0; } diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h index 3d63213bf13e7f06e1e4ba88b0c27524b8065ac5..870d8e206a607e39aeea1c007d884664612d2713 100644 --- a/drivers/media/platform/msm/npu/npu_firmware.h +++ b/drivers/media/platform/msm/npu/npu_firmware.h @@ -78,6 +78,8 @@ #define HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT 2 /* Host requests to pause fw during boot up */ #define HOST_CTRL_STATUS_FW_PAUSE 3 +/* Host requests to disable watchdog */ +#define HOST_CTRL_STATUS_DISABLE_WDOG_BIT 4 /* 32 bit values of the bit fields above */ #define HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL \ @@ -88,6 +90,8 @@ (1 << HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT) #define HOST_CTRL_STATUS_FW_PAUSE_VAL \ (1 << HOST_CTRL_STATUS_FW_PAUSE) +#define HOST_CTRL_STATUS_DISABLE_WDOG_VAL \ + (1 << HOST_CTRL_STATUS_DISABLE_WDOG_BIT) /* NPU HOST DSP Control/Status Register */ diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h index 1c61849a6cb89965c93748b93b631860d961ddd1..c941c120a03218f415c215ad91dab556552e5ba9 100644 --- a/drivers/media/platform/msm/npu/npu_hw.h +++ b/drivers/media/platform/msm/npu/npu_hw.h @@ -31,11 +31,14 @@ #define NPU_MASTERn_ERROR_IRQ_ENABLE(n) (0x00101018+0x1000*(n)) #define NPU_MASTERn_ERROR_IRQ_CLEAR(n) (0x0010101C+0x1000*(n)) #define NPU_MASTERn_ERROR_IRQ_SET(n) (0x00101020+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_OWNER(n) (0x00107000+4*(n)) #define NPU_ERROR_IRQ_MASK 0x000000E3 #define NPU_MASTERn_WDOG_IRQ_STATUS(n) (0x00101030+0x1000*(n)) #define NPU_WDOG_BITE_IRQ_STATUS (1 << 1) #define NPU_MASTERn_WDOG_IRQ_INCLUDE(n) (0x00101034+0x1000*(n)) #define NPU_WDOG_BITE_IRQ_INCLUDE (1 << 1) +#define NPU_MASTERn_WDOG_IRQ_OWNER(n) (0x00107010+4*(n)) +#define NPU_WDOG_IRQ_MASK 0x00000002 #define NPU_GPR1 (0x00100104) diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 55a1315554fcd277d24746a80f664d9e7c486fbf..1d3bc832d20a6d01be62d9a7779f2b83a03bfac5 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -61,6 +61,7 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx, void *cmd_ptr); static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt); static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up); +static int npu_notify_aop(struct npu_device *npu_dev, bool on); /* ------------------------------------------------------------------------- * Function Definitions - Init / Deinit @@ -68,7 +69,7 @@ static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up); */ int fw_init(struct npu_device *npu_dev) { - uint32_t reg_val = 0; + uint32_t reg_val; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; int ret = 0; mutex_lock(&host_ctx->lock); @@ -78,6 +79,8 @@ int fw_init(struct npu_device *npu_dev) return 0; } + npu_notify_aop(npu_dev, true); + if (npu_enable_core_power(npu_dev)) { ret = -EPERM; goto enable_pw_fail; @@ -100,13 +103,16 @@ int fw_init(struct npu_device *npu_dev) REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0); REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0); REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0); - if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE) { - pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode); - REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, - HOST_CTRL_STATUS_FW_PAUSE_VAL); - } else { - REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, 0x0); - } + + pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode); + reg_val = 0; + if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE) + reg_val |= HOST_CTRL_STATUS_FW_PAUSE_VAL; + + if (host_ctx->fw_dbg_mode & FW_DBG_DISABLE_WDOG) + reg_val |= HOST_CTRL_STATUS_DISABLE_WDOG_VAL; + + REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val); /* Read back to flush all registers for fw to read */ REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS); @@ -150,6 +156,7 @@ int fw_init(struct npu_device *npu_dev) host_ctx->fw_state = FW_ENABLED; host_ctx->fw_error = false; host_ctx->fw_ref_cnt++; + reinit_completion(&host_ctx->fw_deinit_done); mutex_unlock(&host_ctx->lock); pr_debug("firmware init complete\n"); @@ -230,12 +237,21 @@ void fw_deinit(struct npu_device *npu_dev, bool ssr) } } - npu_notify_dsp(npu_dev, false); - npu_disable_post_pil_clocks(npu_dev); npu_disable_sys_cache(npu_dev); subsystem_put_local(host_ctx->subsystem_handle); host_ctx->fw_state = FW_DISABLED; + + /* + * if it's not in ssr mode, notify dsp before power off + * otherwise delay 500 ms to make sure dsp has finished + * its own ssr handling. + */ + if (!ssr) + npu_notify_dsp(npu_dev, false); + else + msleep(500); + npu_disable_core_power(npu_dev); if (ssr) { @@ -247,8 +263,11 @@ void fw_deinit(struct npu_device *npu_dev, bool ssr) } } + complete(&host_ctx->fw_deinit_done); mutex_unlock(&host_ctx->lock); pr_debug("firmware deinit complete\n"); + npu_notify_aop(npu_dev, false); + return; } @@ -259,6 +278,7 @@ int npu_host_init(struct npu_device *npu_dev) memset(host_ctx, 0, sizeof(*host_ctx)); init_completion(&host_ctx->loopback_done); + init_completion(&host_ctx->fw_deinit_done); mutex_init(&host_ctx->lock); atomic_set(&host_ctx->ipc_trans_id, 1); @@ -432,6 +452,38 @@ static int npu_notify_dsp(struct npu_device *npu_dev, bool pwr_up) return ret; } +#define MAX_LEN 128 + +static int npu_notify_aop(struct npu_device *npu_dev, bool on) +{ + char buf[MAX_LEN]; + struct qmp_pkt pkt; + int buf_size, rc = 0; + + if (!npu_dev->mbox_aop.chan) { + pr_warn("aop mailbox channel is not available\n"); + return 0; + } + + buf_size = snprintf(buf, MAX_LEN, "{class: bcm, res: npu_on, val: %d}", + on ? 1 : 0); + if (buf_size < 0) { + pr_err("prepare qmp notify buf failed\n"); + return -EINVAL; + } + + pr_debug("send msg %s to aop\n", buf); + memset(&pkt, 0, sizeof(pkt)); + pkt.size = (buf_size + 3) & ~0x3; + pkt.data = buf; + + rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt); + if (rc < 0) + pr_err("qmp message send failed, ret=%d\n", rc); + + return rc; +} + /* ------------------------------------------------------------------------- * Function Definitions - Network Management * ------------------------------------------------------------------------- @@ -463,7 +515,7 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx, network->fw_error = false; network->cmd_pending = false; network->client = client; - network->stats_buf = kzalloc(MSM_NPU_MAX_STATS_BUF_SIZE, + network->stats_buf = kzalloc(NPU_MAX_STATS_BUF_SIZE, GFP_KERNEL); if (!network->stats_buf) { free_network(ctx, network->id); @@ -780,6 +832,18 @@ int32_t npu_host_map_buf(struct npu_client *client, int32_t npu_host_unmap_buf(struct npu_client *client, struct msm_npu_unmap_buf_ioctl *unmap_ioctl) { + struct npu_device *npu_dev = client->npu_dev; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + + /* + * Once SSR occurs, all buffers only can be unmapped until + * fw is disabled + */ + if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) && + !wait_for_completion_interruptible_timeout( + &host_ctx->fw_deinit_done, NW_CMD_TIMEOUT)) + pr_warn("npu: wait for fw_deinit_done time out\n"); + npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl, unmap_ioctl->npu_phys_addr); return 0; diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index 20116957e8bf6775d809f1a4ea7fd6c96a63a72a..bf498cbe4f75711f7a9420762188bf7990886c71 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -35,6 +35,7 @@ #define FW_DBG_MODE_PAUSE (1 << 0) #define FW_DBG_MODE_INC_TIMEOUT (1 << 1) +#define FW_DBG_DISABLE_WDOG (1 << 2) /* ------------------------------------------------------------------------- * Data Structures @@ -76,6 +77,7 @@ struct npu_host_ctx { struct work_struct irq_work; struct workqueue_struct *wq; struct completion loopback_done; + struct completion fw_deinit_done; int32_t network_num; struct npu_network networks[MAX_LOADED_NETWORK]; bool sys_cache_disable; diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index 3f63c7cbfcd4759de924b7e49f6d6fd338625216..eafcf1ea8595cac0ec82433c8f5de9e436a5053a 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -367,7 +367,7 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, fp_t bins_to_bit_factor, vsp_read_factor, vsp_write_factor, dpb_factor, dpb_write_factor, - y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp, + y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp, y_bw_10bpp_p010, motion_vector_complexity = 0; fp_t dpb_total = 0; @@ -444,7 +444,7 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * DIV_ROUND_UP(height, lcu_size); - bitrate = __lut(width, height, fps)->bitrate; + bitrate = (d->bitrate + 1000000 - 1) / 1000000; bins_to_bit_factor = d->work_mode == VIDC_WORK_MODE_1 ? FP_INT(0) : FP_INT(4); @@ -475,6 +475,7 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, FP_INT(1000 * 1000)); y_bw_no_ubwc_10bpp = fp_div(fp_mult(y_bw_no_ubwc_8bpp, FP_INT(256)), FP_INT(192)); + y_bw_10bpp_p010 = y_bw_no_ubwc_8bpp * 2; ddr.dpb_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; ddr.dpb_read = fp_div(fp_mult(ddr.dpb_read, @@ -496,7 +497,8 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, ddr.opb_read = FP_ZERO; ddr.opb_write = unified_dpb_opb ? FP_ZERO : (dpb_bpp == 8 ? - y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp); + y_bw_no_ubwc_8bpp : (opb_compression_enabled ? + y_bw_no_ubwc_10bpp : y_bw_10bpp_p010)); ddr.opb_write = fp_div(fp_mult(dpb_factor, ddr.opb_write), fp_mult(dpb_opb_scaling_ratio, opb_write_compression_factor)); @@ -618,8 +620,9 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, fp_t bins_to_bit_factor, dpb_compression_factor, original_compression_factor, original_compression_factor_y, - y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp, + y_bw_no_ubwc_8bpp, y_bw_no_ubwc_10bpp, y_bw_10bpp_p010, input_compression_factor, + downscaling_ratio, ref_y_read_bw_factor, ref_cbcr_read_bw_factor, recon_write_bw_factor, mese_read_factor, total_ref_read_crcb, @@ -642,7 +645,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, } llc = {0}; /* Encoder Parameters setup */ - rotation = false; + rotation = d->rotation; cropping_or_scaling = false; vertical_tile_width = 960; recon_write_bw_factor = FP(1, 8, 100); @@ -652,9 +655,12 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, /* Derived Parameters */ fps = d->fps; - width = max(d->input_width, BASELINE_DIMENSIONS.width); - height = max(d->input_height, BASELINE_DIMENSIONS.height); - bitrate = d->bitrate > 0 ? d->bitrate / 1000000 : + width = max(d->output_width, BASELINE_DIMENSIONS.width); + height = max(d->output_height, BASELINE_DIMENSIONS.height); + downscaling_ratio = fp_div(FP_INT(d->input_width * d->input_height), + FP_INT(d->output_width * d->output_height)); + downscaling_ratio = max(downscaling_ratio, FP_ONE); + bitrate = d->bitrate > 0 ? (d->bitrate + 1000000 - 1) / 1000000 : __lut(width, height, fps)->bitrate; lcu_size = d->lcu_size; lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * @@ -666,6 +672,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, FP_INT(1000 * 1000)); y_bw_no_ubwc_10bpp = fp_div(fp_mult(y_bw_no_ubwc_8bpp, FP_INT(256)), FP_INT(192)); + y_bw_10bpp_p010 = y_bw_no_ubwc_8bpp * 2; b_frames_enabled = d->b_frames_enabled; original_color_format = d->num_formats >= 1 ? @@ -772,9 +779,13 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, (recon_write_bw_factor - FP_ONE)), recon_write_bw_factor); - ddr.orig_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : y_bw_no_ubwc_10bpp; - ddr.orig_read = fp_div(fp_mult(ddr.orig_read, FP(1, 50, 100)), - original_compression_factor); + ddr.orig_read = dpb_bpp == 8 ? y_bw_no_ubwc_8bpp : + (original_compression_enabled ? y_bw_no_ubwc_10bpp : + y_bw_10bpp_p010); + ddr.orig_read = fp_div(fp_mult(fp_mult(ddr.orig_read, FP(1, 50, 100)), + downscaling_ratio), original_compression_factor); + if (rotation == 90 || rotation == 270) + ddr.orig_read *= lcu_size == 32 ? (dpb_bpp == 8 ? 1 : 3) : 2; ddr.line_buffer_read = FP_INT(tnbr_per_lcu * lcu_per_frame * fps / bps(1)); @@ -814,6 +825,8 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, {"width", "%d", width}, {"height", "%d", height}, {"fps", "%d", fps}, + {"dpb bitdepth", "%d", dpb_bpp}, + {"input downscaling ratio", DUMP_FP_FMT, downscaling_ratio}, {"rotation", "%d", rotation}, {"cropping or scaling", "%d", cropping_or_scaling}, {"low power mode", "%d", low_power}, diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index a3387ed4d190f7235ac15daeef30fde41a530fc6..f61dbb56988144400f121a55e3824d06b65dbd79 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -577,6 +577,9 @@ static int get_hfi_extradata_index(enum hal_extradata_id index) case HAL_EXTRADATA_UBWC_CR_STATS_INFO: ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA; break; + case HAL_EXTRADATA_HDR10PLUS_METADATA: + ret = HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA; + break; default: dprintk(VIDC_WARN, "Extradata index not found: %d\n", index); break; diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index b480f05e3d093454f544d170c517268f6e3b9f6b..350b37f2a29becdef381a1b5453f0fceaad0ccda 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -437,6 +437,13 @@ static u32 get_frame_size(struct msm_vidc_inst *inst, frame_size = fmt->get_frame_size(plane, inst->capability.mbs_per_frame.max, MB_SIZE_IN_PIXEL); + if (inst->flags & VIDC_SECURE) { + dprintk(VIDC_DBG, + "Change secure input buffer size from %u to %u\n", + frame_size, frame_size / 2); + frame_size = frame_size / 2; + } + if (inst->buffer_size_limit && (inst->buffer_size_limit < frame_size)) { frame_size = inst->buffer_size_limit; diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index 6107712ed4348df0fe4a6f2edb6bdbda93755de4..dfc21546c0b65c1dff6aba75a3e467e3985f1e9d 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -690,7 +690,8 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = { (1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) | (1 << V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO) | (1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) | - (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) + (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) | + (1 << V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA) ), .qmenu = mpeg_video_vidc_extradata, }, @@ -1759,6 +1760,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) switch (ctrl->val) { case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO: case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP: + case V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA: inst->bufq[OUTPUT_PORT].num_planes = 2; break; case V4L2_MPEG_VIDC_EXTRADATA_LTR: @@ -1822,7 +1824,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME: property_id = HAL_CONFIG_VENC_USELTRFRAME; use_ltr.ref_ltr = ctrl->val; - use_ltr.use_constraint = false; + use_ltr.use_constraint = true; use_ltr.frames = 0; pdata = &use_ltr; break; @@ -2085,6 +2087,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) enable.enable = 0; pdata = &enable; inst->clk_data.low_latency_mode = (bool) enable.enable; + msm_dcvs_try_enable(inst); break; } case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index ce074ef26ec0d71025b3301005da04fb2dcf7208..c384405fe0e44079fadb8695639c662dce7191ce 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -931,9 +931,9 @@ int msm_vidc_set_internal_config(struct msm_vidc_inst *inst) rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) && (codec != V4L2_PIX_FMT_VP8)) { if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && - mbps < CBR_MB_LIMIT) || + mbps <= CBR_MB_LIMIT) || (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR && - mbps < CBR_VFR_MB_LIMIT)) + mbps <= CBR_VFR_MB_LIMIT)) hrd_buf_size.vbv_hdr_buf_size = 500; else hrd_buf_size.vbv_hdr_buf_size = 1000; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index f1788d635b1c646feec2ad93ce2cd7ff0695075d..749ffea6660bfb2afb18b9e79001e668f9f53a6f 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -185,14 +185,6 @@ static int fill_dynamic_stats(struct msm_vidc_inst *inst, vote_data->input_cr = min_input_cr; vote_data->use_dpb_read = false; - /* Check if driver can vote for lower bus BW */ - if (inst->clk_data.load < inst->clk_data.load_norm) { - vote_data->compression_ratio = max_cr; - vote_data->complexity_factor = min_cf; - vote_data->input_cr = max_input_cr; - vote_data->use_dpb_read = true; - } - dprintk(VIDC_PROF, "Input CR = %d Recon CR = %d Complexity Factor = %d\n", vote_data->input_cr, vote_data->compression_ratio, @@ -289,15 +281,12 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) vote_data[i].domain = get_hal_domain(inst->session_type); vote_data[i].codec = get_hal_codec(codec); - vote_data[i].input_width = max(inst->prop.width[OUTPUT_PORT], - inst->prop.width[OUTPUT_PORT]); - vote_data[i].input_height = max(inst->prop.height[OUTPUT_PORT], - inst->prop.height[OUTPUT_PORT]); - vote_data[i].output_width = max(inst->prop.width[CAPTURE_PORT], - inst->prop.width[OUTPUT_PORT]); - vote_data[i].output_height = - max(inst->prop.height[CAPTURE_PORT], - inst->prop.height[OUTPUT_PORT]); + vote_data[i].input_width = inst->prop.width[OUTPUT_PORT]; + vote_data[i].input_height = inst->prop.height[OUTPUT_PORT]; + vote_data[i].output_width = inst->prop.width[CAPTURE_PORT]; + vote_data[i].output_height = inst->prop.height[CAPTURE_PORT]; + vote_data[i].rotation = + msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE); vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC || codec == V4L2_PIX_FMT_VP9) ? 32 : 16; vote_data[i].b_frames_enabled = @@ -313,6 +302,9 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) vote_data[i].bitrate = vote_data[i].bitrate / inst->prop.fps * vote_data[i].fps; } + } else if (inst->session_type == MSM_VIDC_DECODER) { + vote_data[i].bitrate = + filled_len * vote_data[i].fps * 8; } vote_data[i].power_mode = 0; @@ -640,7 +632,7 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, struct allowed_clock_rates_table *allowed_clks_tbl = NULL; u64 rate = 0, fps; struct clock_data *dcvs = NULL; - u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7; + u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 5; core = inst->core; dcvs = &inst->clk_data; @@ -687,8 +679,8 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; - /* 10 / 7 is overhead factor */ - vsp_cycles += ((fps * filled_len * 8) * 10) / 7; + /* vsp perf is about 0.5 bits/cycle */ + vsp_cycles += ((fps * filled_len * 8) * 10) / 5; fw_cycles = fps * inst->core->resources.fw_cycles; @@ -760,6 +752,7 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core) "msm_vidc_clock_voting %d\n", msm_vidc_clock_voting); freq_core_max = msm_vidc_clock_voting; + decrement = false; break; } @@ -767,6 +760,7 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core) dprintk(VIDC_PROF, "Found an instance with Turbo request\n"); freq_core_max = msm_vidc_max_freq(core); + decrement = false; break; } /* increment even if one session requested for it */ @@ -1204,9 +1198,9 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES || (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR && - mbps < CBR_MB_LIMIT) || + mbps <= CBR_MB_LIMIT) || (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR && - mbps < CBR_VFR_MB_LIMIT)) { + mbps <= CBR_VFR_MB_LIMIT)) { pdata.video_work_route = 1; dprintk(VIDC_DBG, "Configured work route = 1"); } @@ -1335,25 +1329,15 @@ int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) } } else if (inst->session_type == MSM_VIDC_ENCODER) { u32 codec = inst->fmts[CAPTURE_PORT].fourcc; - u32 width = inst->prop.width[OUTPUT_PORT]; pdata.video_work_mode = VIDC_WORK_MODE_2; switch (codec) { case V4L2_PIX_FMT_VP8: - { - if (width <= 3840) { - pdata.video_work_mode = VIDC_WORK_MODE_1; - goto decision_done; - } - break; - } case V4L2_PIX_FMT_TME: - { pdata.video_work_mode = VIDC_WORK_MODE_1; goto decision_done; } - } } else { return -EINVAL; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 6c19dba19b59b8031bdf491ede0e74fcf1878e38..62741e9f1c44bc9bb289885d2fae8f1159db1472 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -60,7 +60,7 @@ const char *const mpeg_video_vidc_extradata[] = { "Extradata LTR", "Extradata macroblock metadata", "Extradata VQZip SEI", - "Extradata YUV Stats", + "Extradata HDR10+ Metadata", "Extradata ROI QP", "Extradata output crop", "Extradata display colour SEI", @@ -2240,7 +2240,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data) msm_vidc_handle_hw_error(core); if (response->status == VIDC_ERR_NOC_ERROR) { dprintk(VIDC_WARN, "Got NOC error"); - MSM_VIDC_ERROR(false); + MSM_VIDC_ERROR(true); } dprintk(VIDC_DBG, "Calling core_release\n"); @@ -5343,6 +5343,9 @@ enum hal_extradata_id msm_comm_get_hal_extradata_index( case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO: ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO; break; + case V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA: + ret = HAL_EXTRADATA_HDR10PLUS_METADATA; + break; default: dprintk(VIDC_WARN, "Extradata not found: %d\n", index); break; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 23def7663242f3a87e243ae500c26f7f90b3ee90..954bb65d0668988ea15991075922e87ca1c1a0cc 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -25,7 +25,7 @@ #define FRAME_QUALITY_STEP 1 #define HEIC_GRID_DIMENSION 512 #define CBR_MB_LIMIT (((1280+15)/16)*((720+15)/16)*30) -#define CBR_VFR_MB_LIMIT (((640+15)/16)*((480+15)/16)*30) +#define CBR_VFR_MB_LIMIT (((1280+15)/16)*((720+15)/16)*30) struct vb2_buf_entry { struct list_head list; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c index 531ad8ceb67823bff3f4cbc0b00997e33e7c5d35..5c5bbdfcb1ed9d9d1cc48f6505bf226aac9b9762 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c @@ -79,6 +79,18 @@ static struct msm_vidc_codec_data sm8150_codec_data[] = { CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 10, 200, 200), }; +static struct msm_vidc_codec_data sdmmagpie_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 10, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 10, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_ENCODER, 10, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_VIDC_ENCODER, 0, 540, 540), + CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 10, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 10, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 10, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_DECODER, 10, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 10, 200, 200), +}; + static struct msm_vidc_codec_data sdm845_codec_data[] = { CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320), CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 125, 675, 320), @@ -264,6 +276,154 @@ static struct msm_vidc_common_data sm8150_common_data[] = { }, }; +static struct msm_vidc_common_data sdmmagpie_common_data_v0[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 2, /* + * As per design driver allows 3rd + * instance as well since the secure + * flags were updated later for the + * current instance. Hence total + * secure sessions would be + * max-secure-instances + 1. + */ + }, + { + .key = "qcom,max-hw-load", + .value = 3110400, /* 4096x2160@90 */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, + }, + { + .key = "qcom,max-hq-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,max-b-frame-size", + .value = 8160, + }, + { + .key = "qcom,max-b-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,domain-cvp", + .value = 1, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, +}; + +static struct msm_vidc_common_data sdmmagpie_common_data_v1[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 2, /* + * As per design driver allows 3rd + * instance as well since the secure + * flags were updated later for the + * current instance. Hence total + * secure sessions would be + * max-secure-instances + 1. + */ + }, + { + .key = "qcom,max-hw-load", + .value = 1281600, /* 4k@30 Decode + 1080p@30 Encode */ + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, + }, + { + .key = "qcom,max-hq-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,max-b-frame-size", + .value = 8160, + }, + { + .key = "qcom,max-b-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,power-collapse-delay", + .value = 1500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 1000, + }, + { + .key = "qcom,debug-timeout", + .value = 0, + }, + { + .key = "qcom,domain-cvp", + .value = 1, + }, + { + .key = "qcom,decode-batching", + .value = 1, + }, + { + .key = "qcom,dcvs", + .value = 1, + }, + { + .key = "qcom,fw-cycles", + .value = 733003, + }, +}; + static struct msm_vidc_common_data sdm845_common_data[] = { { .key = "qcom,never-unload-fw", @@ -477,6 +637,22 @@ static struct msm_vidc_platform_data sm8150_data = { .vpu_ver = VPU_VERSION_5, }; +static struct msm_vidc_platform_data sdmmagpie_data = { + .codec_data = sdmmagpie_codec_data, + .codec_data_length = ARRAY_SIZE(sdmmagpie_codec_data), + .common_data = sdmmagpie_common_data_v0, + .common_data_length = ARRAY_SIZE(sdmmagpie_common_data_v0), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .gcc_register_base = GCC_VIDEO_AXI_REG_START_ADDR, + .gcc_register_size = GCC_VIDEO_AXI_REG_SIZE, + .vpu_ver = VPU_VERSION_5, +}; + static struct msm_vidc_platform_data sdm845_data = { .codec_data = sdm845_codec_data, .codec_data_length = ARRAY_SIZE(sdm845_codec_data), @@ -518,6 +694,10 @@ static const struct of_device_id msm_vidc_dt_match[] = { .compatible = "qcom,sm8150-vidc", .data = &sm8150_data, }, + { + .compatible = "qcom,sdmmagpie-vidc", + .data = &sdmmagpie_data, + }, { .compatible = "qcom,sdm845-vidc", .data = &sdm845_data, @@ -603,6 +783,16 @@ void *vidc_get_drv_data(struct device *dev) driver_data->common_data_length = ARRAY_SIZE(sdm670_common_data_v1); } + } else if (!strcmp(match->compatible, "qcom,sdmmagpie-vidc")) { + rc = msm_vidc_read_efuse(driver_data, dev); + if (rc) + goto exit; + + if (driver_data->sku_version == SKU_VERSION_1) { + driver_data->common_data = sdmmagpie_common_data_v1; + driver_data->common_data_length = + ARRAY_SIZE(sdmmagpie_common_data_v1); + } } exit: diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h index 82a177565ca74f9893ac1a2af284117c698c0068..0077de19ae679db83e88576b387ea4a8b860b59d 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi.h @@ -218,6 +218,9 @@ struct hfi_extradata_header { (HFI_PROPERTY_PARAM_VENC_OX_START + 0x006) #define HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA \ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x008) +#define HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA \ + (HFI_PROPERTY_PARAM_VENC_OX_START + 0x00A) + #define HFI_PROPERTY_CONFIG_VENC_OX_START \ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000) #define HFI_PROPERTY_PARAM_VPE_OX_START \ diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index ac7c1ee8c63e6ab806e55cd534b2d3ab3f01c1c3..b6315220861d492b2b469d46c8f4d508f221272e 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -114,6 +114,7 @@ enum hal_extradata_id { HAL_EXTRADATA_VUI_DISPLAY_INFO, HAL_EXTRADATA_VPX_COLORSPACE, HAL_EXTRADATA_UBWC_CR_STATS_INFO, + HAL_EXTRADATA_HDR10PLUS_METADATA, }; enum hal_property { @@ -1396,6 +1397,7 @@ struct vidc_bus_vote_data { int num_formats; /* 1 = DPB-OPB unified; 2 = split */ int input_height, input_width, fps, bitrate; int output_height, output_width; + int rotation; int compression_ratio; int complexity_factor; int input_cr; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index e2fe8db9bb950b15d4cab0ab3d9724e6804d2dd1..e0f40389ca83de4a673c841ced201495ded37de6 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -365,10 +365,12 @@ struct hfi_buffer_info { (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x010) #define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012) -#define HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY \ +#define HFI_PROPERTY_CONFIG_HEIC_FRAME_CROP_INFO \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x013) -#define HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE \ +#define HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x014) +#define HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x015) #define HFI_PROPERTY_PARAM_VPE_COMMON_START \ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000) diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 79773fc69f533c8d7fe0d34053264e9eda467dbb..9a00a6e4b58426e3883f760fdc9108d13e2cd6a3 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -565,4 +565,5 @@ source "drivers/misc/mic/Kconfig" source "drivers/misc/genwqe/Kconfig" source "drivers/misc/echo/Kconfig" source "drivers/misc/cxl/Kconfig" +source "drivers/misc/fpr_FingerprintCard/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index eca16c6aeecc272b816ff3dd6d5b18464c95096f..e4db9929966601b23c195dbbafe2ad9fe959be06 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -56,9 +56,10 @@ obj-$(CONFIG_QSEECOM) += qseecom.o obj-$(CONFIG_ECHO) += echo/ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o obj-$(CONFIG_CXL_BASE) += cxl/ -obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o -obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o -obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o +obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o +obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_FPR_FPC) += fpr_FingerprintCard/ obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o diff --git a/drivers/misc/fpr_FingerprintCard/Kconfig b/drivers/misc/fpr_FingerprintCard/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c9599e6bb4a35172e5edfa9b856c45439d6a49f7 --- /dev/null +++ b/drivers/misc/fpr_FingerprintCard/Kconfig @@ -0,0 +1,10 @@ +# +# FingerprintCard fingerprint driver +# +menu "FingerprintCard fingerprint driver" +config FPR_FPC + default n + tristate "FPC_BTP fingerprint sensor support" + depends on SPI_MASTER + +endmenu diff --git a/drivers/misc/fpr_FingerprintCard/Makefile b/drivers/misc/fpr_FingerprintCard/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..96681eb834a62e863f66117745ab1b71c2ef076b --- /dev/null +++ b/drivers/misc/fpr_FingerprintCard/Makefile @@ -0,0 +1,5 @@ +# Makefile for FingerprintCard fingerprint driver + +fpc1020-objs := fpc1020_platform_tee.o +obj-$(CONFIG_FPR_FPC) += fpc1020.o + diff --git a/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c new file mode 100644 index 0000000000000000000000000000000000000000..887c8eb2f9ee5401c628348fa3644a11f541bfbb --- /dev/null +++ b/drivers/misc/fpr_FingerprintCard/fpc1020_platform_tee.c @@ -0,0 +1,683 @@ +/* + * FPC1020 Fingerprint sensor device driver + * + * This driver will control the platform resources that the FPC fingerprint + * sensor needs to operate. The major things are probing the sensor to check + * that it is actually connected and let the Kernel know this and with that also + * enabling and disabling of regulators, controlling GPIOs such as sensor reset + * line, sensor IRQ line. + * + * The driver will expose most of its available functionality in sysfs which + * enables dynamic control of these features from eg. a user space process. + * + * The sensor's IRQ events will be pushed to Kernel's event handling system and + * are exposed in the drivers event node. + * + * This driver will NOT send any commands to the sensor it only controls the + * electrical parts. + * + * + * Copyright (c) 2015 Fingerprint Cards AB + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License Version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define FPC_TTW_HOLD_TIME 1000 +#define RESET_LOW_SLEEP_MIN_US 5000 +#define RESET_LOW_SLEEP_MAX_US (RESET_LOW_SLEEP_MIN_US + 100) +#define RESET_HIGH_SLEEP1_MIN_US 100 +#define RESET_HIGH_SLEEP1_MAX_US (RESET_HIGH_SLEEP1_MIN_US + 100) +#define RESET_HIGH_SLEEP2_MIN_US 5000 +#define RESET_HIGH_SLEEP2_MAX_US (RESET_HIGH_SLEEP2_MIN_US + 100) +#define PWR_ON_SLEEP_MIN_US 100 +#define PWR_ON_SLEEP_MAX_US (PWR_ON_SLEEP_MIN_US + 900) +#define NUM_PARAMS_REG_ENABLE_SET 2 + +#define RELEASE_WAKELOCK_W_V "release_wakelock_with_verification" +#define RELEASE_WAKELOCK "release_wakelock" +#define START_IRQS_RECEIVED_CNT "start_irqs_received_counter" + +static const char * const pctl_names[] = { + "fpc1020_reset_reset", + "fpc1020_reset_active", + "fpc1020_irq_active", +}; + +struct vreg_config { + char *name; + unsigned long vmin; + unsigned long vmax; + int ua_load; +}; + +static const struct vreg_config vreg_conf[] = { + { "vdd_ana", 1800000UL, 1800000UL, 6000, }, + { "vcc_spi", 1800000UL, 1800000UL, 10, }, + { "vdd_io", 1800000UL, 1800000UL, 6000, }, +}; + +struct fpc1020_data { + struct device *dev; + struct pinctrl *fingerprint_pinctrl; + struct pinctrl_state **pinctrl_state; + struct regulator **vreg; + struct wakeup_source ttw_wl; + struct mutex lock; /* To set/get exported values in sysfs */ + int irq_gpio; + int rst_gpio; + int nbr_irqs_received; + int nbr_irqs_received_counter_start; + bool prepared; + atomic_t wakeup_enabled; /* Used both in ISR and non-ISR */ +}; + +static int vreg_setup(struct fpc1020_data *fpc1020, const char *name, + bool enable) +{ + size_t i; + int rc; + struct regulator *vreg; + struct device *dev = fpc1020->dev; + + for (i = 0; i < ARRAY_SIZE(vreg_conf); i++) { + const char *n = vreg_conf[i].name; + + if (!memcmp(n, name, strlen(n))) + goto found; + } + + dev_err(dev, "Regulator %s not found\n", name); + + return -EINVAL; + +found: + vreg = fpc1020->vreg[i]; + if (enable) { + if (!vreg) { + vreg = devm_regulator_get(dev, name); + if (IS_ERR_OR_NULL(vreg)) { + dev_err(dev, "Unable to get %s\n", name); + return PTR_ERR(vreg); + } + } + + if (regulator_count_voltages(vreg) > 0) { + rc = regulator_set_voltage(vreg, vreg_conf[i].vmin, + vreg_conf[i].vmax); + if (rc) + dev_err(dev, + "Unable to set voltage on %s, %d\n", + name, rc); + } + + rc = regulator_set_load(vreg, vreg_conf[i].ua_load); + if (rc < 0) + dev_err(dev, "Unable to set current on %s, %d\n", + name, rc); + + rc = regulator_enable(vreg); + if (rc) { + dev_err(dev, "error enabling %s: %d\n", name, rc); + vreg = NULL; + } + fpc1020->vreg[i] = vreg; + } else { + if (vreg) { + if (regulator_is_enabled(vreg)) { + regulator_disable(vreg); + dev_dbg(dev, "disabled %s\n", name); + } + fpc1020->vreg[i] = NULL; + } + rc = 0; + } + + return rc; +} + +/* + * sysfs node for controlling clocks. + * + * This is disabled in platform variant of this driver but kept for + * backwards compatibility. Only prints a debug print that it is + * disabled. + */ +static ssize_t clk_enable_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + dev_dbg(dev, + "clk_enable sysfs node not enabled in platform driver\n"); + + return count; +} +static DEVICE_ATTR(clk_enable, 0200, NULL, clk_enable_set); + +/* + * Will try to select the set of pins (GPIOS) defined in a pin control node of + * the device tree named @p name. + * + * The node can contain several eg. GPIOs that is controlled when selecting it. + * The node may activate or deactivate the pins it contains, the action is + * defined in the device tree node itself and not here. The states used + * internally is fetched at probe time. + * + * @see pctl_names + * @see fpc1020_probe + */ +static int select_pin_ctl(struct fpc1020_data *fpc1020, const char *name) +{ + size_t i; + int rc; + struct device *dev = fpc1020->dev; + + for (i = 0; i < ARRAY_SIZE(pctl_names); i++) { + const char *n = pctl_names[i]; + + if (!memcmp(n, name, strlen(n))) { + rc = pinctrl_select_state(fpc1020->fingerprint_pinctrl, + fpc1020->pinctrl_state[i]); + if (rc) + dev_err(dev, "cannot select '%s'\n", name); + else + dev_dbg(dev, "Selected '%s'\n", name); + goto exit; + } + } + + rc = -EINVAL; + dev_err(dev, "%s:'%s' not found\n", __func__, name); + +exit: + return rc; +} + +static ssize_t pinctl_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + int rc; + + mutex_lock(&fpc1020->lock); + rc = select_pin_ctl(fpc1020, buf); + mutex_unlock(&fpc1020->lock); + + return rc ? rc : count; +} +static DEVICE_ATTR(pinctl_set, 0200, NULL, pinctl_set); + +static ssize_t regulator_enable_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + char op; + char name[16]; + int rc; + bool enable; + + if (sscanf(buf, "%15[^,],%c", name, &op) != NUM_PARAMS_REG_ENABLE_SET) + return -EINVAL; + if (op == 'e') + enable = true; + else if (op == 'd') + enable = false; + else + return -EINVAL; + + mutex_lock(&fpc1020->lock); + rc = vreg_setup(fpc1020, name, enable); + mutex_unlock(&fpc1020->lock); + + return rc ? rc : count; +} +static DEVICE_ATTR(regulator_enable, 0200, NULL, regulator_enable_set); + +static int hw_reset(struct fpc1020_data *fpc1020) +{ + int irq_gpio; + int rc; + + irq_gpio = gpio_get_value(fpc1020->irq_gpio); + + rc = select_pin_ctl(fpc1020, "fpc1020_reset_active"); + + if (rc) + goto exit; + + usleep_range(RESET_HIGH_SLEEP1_MIN_US, RESET_HIGH_SLEEP1_MAX_US); + + rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset"); + + if (rc) + goto exit; + usleep_range(RESET_LOW_SLEEP_MIN_US, RESET_LOW_SLEEP_MAX_US); + + rc = select_pin_ctl(fpc1020, "fpc1020_reset_active"); + if (rc) + goto exit; + usleep_range(RESET_HIGH_SLEEP2_MIN_US, RESET_HIGH_SLEEP2_MAX_US); + + irq_gpio = gpio_get_value(fpc1020->irq_gpio); + +exit: + return rc; +} + +static ssize_t hw_reset_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc = -EINVAL; + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + + if (!memcmp(buf, "reset", strlen("reset"))) { + mutex_lock(&fpc1020->lock); + rc = hw_reset(fpc1020); + mutex_unlock(&fpc1020->lock); + } else { + return rc; + } + + return rc ? rc : count; +} +static DEVICE_ATTR(hw_reset, 0200, NULL, hw_reset_set); + +/* + * Will setup GPIOs, and regulators to correctly initialize the touch sensor to + * be ready for work. + * + * In the correct order according to the sensor spec this function will + * enable/disable regulators, and reset line, all to set the sensor in a + * correct power on or off state "electrical" wise. + * + * @see device_prepare_set + * @note This function will not send any commands to the sensor it will only + * control it "electrically". + */ +static int device_prepare(struct fpc1020_data *fpc1020, bool enable) +{ + int rc = 0; + + mutex_lock(&fpc1020->lock); + if (enable && !fpc1020->prepared) { + fpc1020->prepared = true; + select_pin_ctl(fpc1020, "fpc1020_reset_reset"); + + rc = vreg_setup(fpc1020, "vcc_spi", true); + if (rc) + goto exit; + + rc = vreg_setup(fpc1020, "vdd_io", true); + if (rc) + goto exit_1; + + rc = vreg_setup(fpc1020, "vdd_ana", true); + if (rc) + goto exit_2; + + usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US); + + (void)select_pin_ctl(fpc1020, "fpc1020_reset_active"); + } else if (!enable && fpc1020->prepared) { + rc = 0; + (void)select_pin_ctl(fpc1020, "fpc1020_reset_reset"); + + usleep_range(PWR_ON_SLEEP_MIN_US, PWR_ON_SLEEP_MAX_US); + + (void)vreg_setup(fpc1020, "vdd_ana", false); +exit_2: + (void)vreg_setup(fpc1020, "vdd_io", false); +exit_1: + (void)vreg_setup(fpc1020, "vcc_spi", false); +exit: + fpc1020->prepared = false; + } + + mutex_unlock(&fpc1020->lock); + + return rc; +} + +/* + * sysfs node to enable/disable (power up/power down) the touch sensor + * + * @see device_prepare + */ +static ssize_t device_prepare_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc; + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + + if (!memcmp(buf, "enable", strlen("enable"))) + rc = device_prepare(fpc1020, true); + else if (!memcmp(buf, "disable", strlen("disable"))) + rc = device_prepare(fpc1020, false); + else + return -EINVAL; + + return rc ? rc : count; +} +static DEVICE_ATTR(device_prepare, 0200, NULL, device_prepare_set); + +/** + * sysfs node for controlling whether the driver is allowed + * to wake up the platform on interrupt. + */ +static ssize_t wakeup_enable_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + ssize_t ret = count; + + mutex_lock(&fpc1020->lock); + if (!memcmp(buf, "enable", strlen("enable"))) + atomic_set(&fpc1020->wakeup_enabled, 1); + else if (!memcmp(buf, "disable", strlen("disable"))) + atomic_set(&fpc1020->wakeup_enabled, 0); + else + ret = -EINVAL; + mutex_unlock(&fpc1020->lock); + + return ret; +} +static DEVICE_ATTR(wakeup_enable, 0200, NULL, wakeup_enable_set); + + +/* + * sysfs node for controlling the wakelock. + */ +static ssize_t handle_wakelock_cmd(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + ssize_t ret = count; + + mutex_lock(&fpc1020->lock); + if (!memcmp(buf, RELEASE_WAKELOCK_W_V, + min(count, strlen(RELEASE_WAKELOCK_W_V)))) { + if (fpc1020->nbr_irqs_received_counter_start == + fpc1020->nbr_irqs_received) { + __pm_relax(&fpc1020->ttw_wl); + } else { + dev_dbg(dev, "Ignore releasing of wakelock %d != %d", + fpc1020->nbr_irqs_received_counter_start, + fpc1020->nbr_irqs_received); + } + } else if (!memcmp(buf, RELEASE_WAKELOCK, min(count, + strlen(RELEASE_WAKELOCK)))) { + __pm_relax(&fpc1020->ttw_wl); + } else if (!memcmp(buf, START_IRQS_RECEIVED_CNT, + min(count, strlen(START_IRQS_RECEIVED_CNT)))) { + fpc1020->nbr_irqs_received_counter_start = + fpc1020->nbr_irqs_received; + } else + ret = -EINVAL; + mutex_unlock(&fpc1020->lock); + + return ret; +} +static DEVICE_ATTR(handle_wakelock, 0200, NULL, handle_wakelock_cmd); + +/* + * sysf node to check the interrupt status of the sensor, the interrupt + * handler should perform sysf_notify to allow userland to poll the node. + */ +static ssize_t irq_get(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + int irq = gpio_get_value(fpc1020->irq_gpio); + + return scnprintf(buf, PAGE_SIZE, "%i\n", irq); +} + +/* + * writing to the irq node will just drop a printk message + * and return success, used for latency measurement. + */ +static ssize_t irq_ack(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpc1020_data *fpc1020 = dev_get_drvdata(dev); + + dev_dbg(fpc1020->dev, "%s\n", __func__); + + return count; +} +static DEVICE_ATTR(irq, 0600 | 0200, irq_get, irq_ack); + +static struct attribute *attributes[] = { + &dev_attr_pinctl_set.attr, + &dev_attr_device_prepare.attr, + &dev_attr_regulator_enable.attr, + &dev_attr_hw_reset.attr, + &dev_attr_wakeup_enable.attr, + &dev_attr_handle_wakelock.attr, + &dev_attr_clk_enable.attr, + &dev_attr_irq.attr, + NULL +}; + +static const struct attribute_group attribute_group = { + .attrs = attributes, +}; + +static irqreturn_t fpc1020_irq_handler(int irq, void *handle) +{ + struct fpc1020_data *fpc1020 = handle; + + pr_info("fpc1020 irq handler: %s\n", __func__); + mutex_lock(&fpc1020->lock); + if (atomic_read(&fpc1020->wakeup_enabled)) { + fpc1020->nbr_irqs_received++; + __pm_wakeup_event(&fpc1020->ttw_wl, + msecs_to_jiffies(FPC_TTW_HOLD_TIME)); + } + mutex_unlock(&fpc1020->lock); + + sysfs_notify(&fpc1020->dev->kobj, NULL, dev_attr_irq.attr.name); + + return IRQ_HANDLED; +} + +static int fpc1020_request_named_gpio(struct fpc1020_data *fpc1020, + const char *label, int *gpio) +{ + struct device *dev = fpc1020->dev; + struct device_node *np = dev->of_node; + int rc; + + rc = of_get_named_gpio(np, label, 0); + + if (rc < 0) { + dev_err(dev, "failed to get '%s'\n", label); + return rc; + } + *gpio = rc; + + rc = devm_gpio_request(dev, *gpio, label); + if (rc) { + dev_err(dev, "failed to request gpio %d\n", *gpio); + return rc; + } + dev_dbg(dev, "%s %d\n", label, *gpio); + + return 0; +} + +static int fpc1020_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int rc = 0; + size_t i; + int irqf; + struct fpc1020_data *fpc1020 = devm_kzalloc(dev, sizeof(*fpc1020), + GFP_KERNEL); + if (!fpc1020) { + rc = -ENOMEM; + goto exit; + } + + + fpc1020->dev = dev; + platform_set_drvdata(pdev, fpc1020); + + rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_irq", + &fpc1020->irq_gpio); + if (rc) + goto exit; + rc = fpc1020_request_named_gpio(fpc1020, "fpc,gpio_rst", + &fpc1020->rst_gpio); + if (rc) + goto exit; + + fpc1020->fingerprint_pinctrl = devm_pinctrl_get(dev); + if (IS_ERR(fpc1020->fingerprint_pinctrl)) { + if (PTR_ERR(fpc1020->fingerprint_pinctrl) == -EPROBE_DEFER) { + dev_info(dev, "pinctrl not ready\n"); + rc = -EPROBE_DEFER; + goto exit; + } + dev_err(dev, "Target does not use pinctrl\n"); + fpc1020->fingerprint_pinctrl = NULL; + rc = -EINVAL; + goto exit; + } + + for (i = 0; i < ARRAY_SIZE(pctl_names); i++) { + const char *n = pctl_names[i]; + struct pinctrl_state *state = + pinctrl_lookup_state(fpc1020->fingerprint_pinctrl, n); + if (IS_ERR(state)) { + dev_err(dev, "cannot find '%s'\n", n); + rc = -EINVAL; + goto exit; + } + dev_info(dev, "found pin control %s\n", n); + fpc1020->pinctrl_state[i] = state; + } + + rc = select_pin_ctl(fpc1020, "fpc1020_reset_reset"); + if (rc) + goto exit; + rc = select_pin_ctl(fpc1020, "fpc1020_irq_active"); + if (rc) + goto exit; + + atomic_set(&fpc1020->wakeup_enabled, 0); + + irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT; + if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) { + irqf |= IRQF_NO_SUSPEND; + device_init_wakeup(dev, 1); + } + + mutex_init(&fpc1020->lock); + rc = devm_request_threaded_irq(dev, gpio_to_irq(fpc1020->irq_gpio), + NULL, fpc1020_irq_handler, irqf, + dev_name(dev), fpc1020); + if (rc) { + dev_err(dev, "could not request irq %d\n", + gpio_to_irq(fpc1020->irq_gpio)); + goto exit; + } + + dev_info(dev, "requested irq %d\n", gpio_to_irq(fpc1020->irq_gpio)); + + /* Request that the interrupt should be wakeable */ + enable_irq_wake(gpio_to_irq(fpc1020->irq_gpio)); + + wakeup_source_init(&fpc1020->ttw_wl, "fpc_ttw_wl"); + + rc = sysfs_create_group(&dev->kobj, &attribute_group); + if (rc) { + dev_err(dev, "could not create sysfs\n"); + goto exit; + } + + if (of_property_read_bool(dev->of_node, "fpc,enable-on-boot")) { + dev_info(dev, "Enabling hardware\n"); + (void)device_prepare(fpc1020, true); + } + + rc = hw_reset(fpc1020); + + dev_info(dev, "%s: ok\n", __func__); + +exit: + return rc; +} + +static int fpc1020_remove(struct platform_device *pdev) +{ + struct fpc1020_data *fpc1020 = platform_get_drvdata(pdev); + + sysfs_remove_group(&pdev->dev.kobj, &attribute_group); + mutex_destroy(&fpc1020->lock); + wakeup_source_trash(&fpc1020->ttw_wl); + (void)vreg_setup(fpc1020, "vdd_ana", false); + (void)vreg_setup(fpc1020, "vdd_io", false); + (void)vreg_setup(fpc1020, "vcc_spi", false); + dev_info(&pdev->dev, "%s\n", __func__); + + return 0; +} + +static const struct of_device_id fpc1020_of_match[] = { + { .compatible = "fpc,fpc1020", }, + {} +}; +MODULE_DEVICE_TABLE(of, fpc1020_of_match); + +static struct platform_driver fpc1020_driver = { + .driver = { + .name = "fpc1020", + .owner = THIS_MODULE, + .of_match_table = fpc1020_of_match, + }, + .probe = fpc1020_probe, + .remove = fpc1020_remove, +}; + +static int __init fpc1020_init(void) +{ + int rc = platform_driver_register(&fpc1020_driver); + + if (!rc) + pr_info("%s OK\n", __func__); + else + pr_err("%s %d\n", __func__, rc); + + return rc; +} + +static void __exit fpc1020_exit(void) +{ + pr_info("%s\n", __func__); + platform_driver_unregister(&fpc1020_driver); +} + +module_init(fpc1020_init); +module_exit(fpc1020_exit); + + +MODULE_DESCRIPTION("FPC1020 Fingerprint sensor device driver."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c index 6efd353ac298e0948cfa5cd6ba170a71f98615ce..123e0e1b907190c3e90f8a911ab01acf7619f941 100644 --- a/drivers/misc/hdcp_qseecom.c +++ b/drivers/misc/hdcp_qseecom.c @@ -146,7 +146,8 @@ const char *HdcpErrors[] = { "HDCP_GET_CONTENT_LEVEL_FAILED", "HDCP_STREAMID_INUSE", "HDCP_STREAM_NOT_FOUND", - "HDCP_FORCE_ENCRYPTION_FAILED" + "HDCP_FORCE_ENCRYPTION_FAILED", + "HDCP_STREAMNUMBER_INUSE" }; /* flags set by tz in response message */ @@ -218,6 +219,8 @@ enum { hdcp_cmd_session_init = SERVICE_CREATE_CMD(16), hdcp_cmd_session_deinit = SERVICE_CREATE_CMD(17), hdcp_cmd_start_auth = SERVICE_CREATE_CMD(18), + hdcp_cmd_session_open_stream = SERVICE_CREATE_CMD(20), + hdcp_cmd_session_close_stream = SERVICE_CREATE_CMD(21), hdcp_cmd_force_encryption = SERVICE_CREATE_CMD(22), }; @@ -509,7 +512,32 @@ struct __attribute__ ((__packed__)) hdcp_start_auth_rsp { uint8_t message[MAX_TX_MESSAGE_SIZE]; }; -struct __attribute__ ((__packed__)) hdcp_force_encryption_req { +struct __attribute__((__packed__)) hdcp_session_open_stream_req { + uint32_t commandid; + uint32_t sessionid; + uint32_t vcpayloadid; + uint32_t stream_number; + uint32_t streamMediaType; +}; + +struct __attribute__((__packed__)) hdcp_session_open_stream_rsp { + uint32_t status; + uint32_t commandid; + uint32_t streamid; +}; + +struct __attribute__((__packed__)) hdcp_session_close_stream_req { + uint32_t commandid; + uint32_t sessionid; + uint32_t streamid; +}; + +struct __attribute__((__packed__)) hdcp_session_close_stream_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__((__packed__)) hdcp_force_encryption_req { uint32_t commandid; uint32_t ctxhandle; uint32_t enable; @@ -1073,8 +1101,6 @@ static int hdcp2_app_process_msg(struct hdcp2_handle *handle) /* check if it's a repeater */ if (rsp_buf->flag == HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) handle->app_data.repeater_flag = true; - else - handle->app_data.repeater_flag = false; handle->app_data.response.data = rsp_buf->msg; handle->app_data.response.length = rsp_buf->msglen; @@ -1248,6 +1274,106 @@ int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, return rc; } +static int hdcp2_open_stream_helper(struct hdcp2_handle *handle, + uint8_t vc_payload_id, + uint8_t stream_number, + uint32_t *stream_id) +{ + int rc = 0; + + hdcp2_app_init_var(session_open_stream); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->sessionid = handle->session_id; + req_buf->vcpayloadid = vc_payload_id; + req_buf->stream_number = stream_number; + req_buf->streamMediaType = 0; + + rc = hdcp2_app_process_cmd(session_open_stream); + if (rc) + goto error; + + *stream_id = rsp_buf->streamid; + + pr_debug("success\n"); + +error: + return rc; +} + +int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, uint8_t stream_number, + uint32_t *stream_id) +{ + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + + return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number, + stream_id); +} + +static int hdcp2_close_stream_helper(struct hdcp2_handle *handle, + uint32_t stream_id) +{ + int rc = 0; + + hdcp2_app_init_var(session_close_stream); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->sessionid = handle->session_id; + req_buf->streamid = stream_id; + + rc = hdcp2_app_process_cmd(session_close_stream); + + if (rc) + goto error; + + pr_debug("success\n"); +error: + return rc; +} + +int hdcp2_close_stream(void *ctx, uint32_t stream_id) +{ + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + + return hdcp2_close_stream_helper(handle, stream_id); +} + void *hdcp2_init(u32 device_type) { struct hdcp2_handle *handle = NULL; diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 13a2279e2cc7029f2fde5a011e670a93ea804430..04b1f7088cc3dc582de0f457e81e0d98daaccfc3 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -1334,9 +1334,10 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data, rcvd_lstnr.sb_size)) return -EFAULT; - data->listener.id = 0; + data->listener.id = rcvd_lstnr.listener_id; if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) { - pr_err("Service is not unique and is already registered\n"); + pr_err("Service %d is not unique and failed to register\n", + rcvd_lstnr.listener_id); data->released = true; return -EBUSY; } @@ -1351,12 +1352,12 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data, new_entry->sb_length = rcvd_lstnr.sb_size; new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base; if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) { - pr_err("qseecom_set_sb_memoryfailed\n"); + pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n", + rcvd_lstnr.listener_id, rcvd_lstnr.sb_size); kzfree(new_entry); return -ENOMEM; } - data->listener.id = rcvd_lstnr.listener_id; init_waitqueue_head(&new_entry->rcv_req_wq); init_waitqueue_head(&new_entry->listener_block_app_wq); new_entry->send_resp_flag = 0; @@ -1365,6 +1366,7 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data, list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head); spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id); return ret; } @@ -1407,13 +1409,14 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) if (ret) { pr_err("scm_call() failed with err: %d (lstnr id=%d)\n", ret, data->listener.id); - return ret; + goto exit; } if (resp.result != QSEOS_RESULT_SUCCESS) { pr_err("Failed resp.result=%d,(lstnr id=%d)\n", resp.result, data->listener.id); - return -EPERM; + ret = -EPERM; + goto exit; } data->abort = 1; @@ -1425,10 +1428,10 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) atomic_read(&data->ioctl_count) <= 1)) { pr_err("Interrupted from abort\n"); ret = -ERESTARTSYS; - return ret; } } +exit: if (ptr_svc->dmabuf) qseecom_vaddr_unmap(ptr_svc->sb_virt, ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf); @@ -1437,6 +1440,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) kzfree(ptr_svc); data->released = true; + pr_debug("Service %d is unregistered\n", data->listener.id); return ret; } @@ -1766,6 +1770,23 @@ static void __qseecom_clean_listener_sglistinfo( } } +/* wake up listener receive request wq retry delay (ms) and max attemp count */ +#define QSEECOM_WAKE_LISTENER_RCVWQ_DELAY 10 +#define QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP 3 + +static int __qseecom_retry_wake_up_listener_rcv_wq( + struct qseecom_registered_listener_list *ptr_svc) +{ + int retry = 0; + + while (ptr_svc->rcv_req_flag == 1 && + retry++ < QSEECOM_WAKE_LISTENER_RCVWQ_MAX_ATTEMP) { + wake_up_interruptible(&ptr_svc->rcv_req_wq); + msleep(QSEECOM_WAKE_LISTENER_RCVWQ_DELAY); + } + return ptr_svc->rcv_req_flag == 1; +} + static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, struct qseecom_command_scm_resp *resp) { @@ -1833,6 +1854,15 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, status = QSEOS_RESULT_FAILURE; goto err_resp; } + + if (ptr_svc->rcv_req_flag == 1 && + __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) { + pr_err("Service %d is not ready to receive request\n", + lstnr); + rc = -ENOENT; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); /* initialize the new signal mask with all signals*/ @@ -2153,6 +2183,15 @@ static int __qseecom_reentrancy_process_incomplete_cmd( status = QSEOS_RESULT_FAILURE; goto err_resp; } + + if (ptr_svc->rcv_req_flag == 1 && + __qseecom_retry_wake_up_listener_rcv_wq(ptr_svc)) { + pr_err("Service %d is not ready to receive request\n", + lstnr); + rc = -ENOENT; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); /* initialize the new signal mask with all signals*/ @@ -7731,6 +7770,7 @@ static int qseecom_release(struct inode *inode, struct file *file) data->type, data->mode, data); switch (data->type) { case QSEECOM_LISTENER_SERVICE: + pr_warn("release lsnr svc %d\n", data->listener.id); __qseecom_listener_abort_all(1); mutex_lock(&app_access_lock); ret = qseecom_unregister_listener(data); diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index fc0415771c0087264436d1ba434eccae8ef78dff..4dd0d868ff88818f7bcb8db51cac461cb6541684 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -407,13 +407,20 @@ static int sram_probe(struct platform_device *pdev) if (init_func) { ret = init_func(); if (ret) - return ret; + goto err_disable_clk; } dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", gen_pool_size(sram->pool) / 1024, sram->virt_base); return 0; + +err_disable_clk: + if (sram->clk) + clk_disable_unprepare(sram->clk); + sram_free_partitions(sram); + + return ret; } static int sram_remove(struct platform_device *pdev) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c1403ebc0053db2034ba5a3ad00af414511ab6c5..bee1ae6f489617c001e4cb6fbe9bb77ee38450ec 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -3052,6 +3052,8 @@ void mmc_blk_cmdq_complete_rq(struct request *rq) err = mrq->cmd->error; else if (mrq->data && mrq->data->error) err = mrq->data->error; + if (cmdq_req->resp_err) + err = cmdq_req->resp_err; if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) { pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n", @@ -3073,7 +3075,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq) * or disable state so cannot receive any completion of * other requests. */ - BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)); + WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)); /* clear pending request */ BUG_ON(!test_and_clear_bit(cmdq_req->tag, @@ -3107,7 +3109,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq) out: mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd); - if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) { + if (!err) { mmc_host_clk_release(host); wake_up(&ctx_info->wait); mmc_put_card(host->card); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index aecdfeaea9b2cd21986922830881f3ded3df6d5b..9d2751c81ad5565d69cc0b730b1df3f38e44a990 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -505,28 +505,24 @@ static int mmc_devfreq_set_target(struct device *dev, pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host), *freq, current->comm); - if ((clk_scaling->curr_freq == *freq) || - clk_scaling->skip_clk_scale_freq_update) - goto out; - - /* No need to scale the clocks if they are gated */ - if (!host->ios.clock) - goto out; - spin_lock_bh(&clk_scaling->lock); - if (clk_scaling->clk_scaling_in_progress) { - pr_debug("%s: clocks scaling is already in-progress by mmc thread\n", - mmc_hostname(host)); + if (clk_scaling->target_freq == *freq || + clk_scaling->skip_clk_scale_freq_update) { spin_unlock_bh(&clk_scaling->lock); goto out; } + clk_scaling->need_freq_change = true; clk_scaling->target_freq = *freq; clk_scaling->state = *freq < clk_scaling->curr_freq ? MMC_LOAD_LOW : MMC_LOAD_HIGH; spin_unlock_bh(&clk_scaling->lock); - abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort); + if (!clk_scaling->is_suspended && host->ios.clock) + abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort); + else + goto out; + if (abort) goto out; @@ -571,6 +567,7 @@ void mmc_deferred_scaling(struct mmc_host *host) { unsigned long target_freq; int err; + struct mmc_devfeq_clk_scaling clk_scaling; if (!host->clk_scaling.enable) return; @@ -580,8 +577,7 @@ void mmc_deferred_scaling(struct mmc_host *host) spin_lock_bh(&host->clk_scaling.lock); - if (host->clk_scaling.clk_scaling_in_progress || - !(host->clk_scaling.need_freq_change)) { + if (!host->clk_scaling.need_freq_change) { spin_unlock_bh(&host->clk_scaling.lock); return; } @@ -589,7 +585,12 @@ void mmc_deferred_scaling(struct mmc_host *host) atomic_inc(&host->clk_scaling.devfreq_abort); target_freq = host->clk_scaling.target_freq; - host->clk_scaling.clk_scaling_in_progress = true; + /* + * Store the clock scaling state while the lock is acquired so that + * if devfreq context modifies clk_scaling, it will get reflected only + * in the next deferred scaling check. + */ + clk_scaling = host->clk_scaling; host->clk_scaling.need_freq_change = false; spin_unlock_bh(&host->clk_scaling.lock); pr_debug("%s: doing deferred frequency change (%lu) (%s)\n", @@ -597,7 +598,7 @@ void mmc_deferred_scaling(struct mmc_host *host) target_freq, current->comm); err = mmc_clk_update_freq(host, target_freq, - host->clk_scaling.state); + clk_scaling.state); if (err && err != -EAGAIN) { pr_err("%s: failed on deferred scale clocks (%d)\n", mmc_hostname(host), err); @@ -607,7 +608,6 @@ void mmc_deferred_scaling(struct mmc_host *host) mmc_hostname(host), target_freq, current->comm); } - host->clk_scaling.clk_scaling_in_progress = false; atomic_dec(&host->clk_scaling.devfreq_abort); } EXPORT_SYMBOL(mmc_deferred_scaling); @@ -737,7 +737,6 @@ int mmc_init_clk_scaling(struct mmc_host *host) spin_lock_init(&host->clk_scaling.lock); atomic_set(&host->clk_scaling.devfreq_abort, 0); host->clk_scaling.curr_freq = host->ios.clock; - host->clk_scaling.clk_scaling_in_progress = false; host->clk_scaling.need_freq_change = false; host->clk_scaling.is_busy_started = false; @@ -808,7 +807,8 @@ int mmc_suspend_clk_scaling(struct mmc_host *host) return -EINVAL; } - if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable) + if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable || + host->clk_scaling.is_suspended) return 0; if (!host->clk_scaling.devfreq) { @@ -825,7 +825,7 @@ int mmc_suspend_clk_scaling(struct mmc_host *host) mmc_hostname(host), __func__); return err; } - host->clk_scaling.enable = false; + host->clk_scaling.is_suspended = true; host->clk_scaling.total_busy_time_us = 0; @@ -879,15 +879,12 @@ int mmc_resume_clk_scaling(struct mmc_host *host) if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx]) host->clk_scaling.curr_freq = devfreq_min_clk; - host->clk_scaling.clk_scaling_in_progress = false; - host->clk_scaling.need_freq_change = false; - err = devfreq_resume_device(host->clk_scaling.devfreq); if (err) { pr_err("%s: %s: failed to resume devfreq (%d)\n", mmc_hostname(host), __func__, err); } else { - host->clk_scaling.enable = true; + host->clk_scaling.is_suspended = false; pr_debug("%s: devfreq resumed\n", mmc_hostname(host)); } @@ -1078,6 +1075,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) mrq->stop->resp[0], mrq->stop->resp[1], mrq->stop->resp[2], mrq->stop->resp[3]); } + mmc_host_clk_release(host); } /* * Request starter must handle retries - see @@ -1086,7 +1084,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) if (mrq->done) mrq->done(mrq); - mmc_host_clk_release(host); } EXPORT_SYMBOL(mmc_request_done); @@ -1626,7 +1623,8 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) mmc_card_removed(host->card)) { if (cmd->error && !cmd->retries && cmd->opcode != MMC_SEND_STATUS && - cmd->opcode != MMC_SEND_TUNING_BLOCK) + cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) mmc_recovery_fallback_lower_speed(host); break; } diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 46dccab26d29b3a330b8afa076a8000a776a9668..80793c74bd478fbed0d122e17077adb47c1d660a 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -496,6 +496,11 @@ void mmc_add_host_debugfs(struct mmc_host *host) &host->cmdq_thist_enabled)) goto err_node; + if (!debugfs_create_bool("crash_on_err", + 0600, root, + &host->crash_on_err)) + goto err_node; + #ifdef CONFIG_MMC_RING_BUFFER if (!debugfs_create_file("ring_buffer", 0400, root, host, &mmc_ring_buffer_fops)) diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index a6fc7f68ec0c52d585e2f7ec93d1a80d7460bb3b..999b097a688382f4b5f2b1f3460d75c25e6cdddf 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -48,9 +48,28 @@ static void mmc_host_classdev_release(struct device *dev) kfree(host); } +static int mmc_host_prepare(struct device *dev) +{ + /* + * Since mmc_host is a virtual device, we don't have to do anything. + * If we return a positive value, the pm framework will consider that + * the runtime suspend and system suspend of this device is same and + * will set direct_complete flag as true. We don't want this as the + * mmc_host always has positive disable_depth and setting the flag + * will not speed up the suspend process. + * So return 0. + */ + return 0; +} + +static const struct dev_pm_ops mmc_pm_ops = { + .prepare = mmc_host_prepare, +}; + static struct class mmc_host_class = { .name = "mmc_host", .dev_release = mmc_host_classdev_release, + .pm = &mmc_pm_ops, }; int mmc_register_host_class(void) @@ -742,6 +761,7 @@ static ssize_t store_enable(struct device *dev, /* Suspend the clock scaling and mask host capability */ if (host->clk_scaling.enable) mmc_suspend_clk_scaling(host); + host->clk_scaling.enable = false; host->caps2 &= ~MMC_CAP2_CLK_SCALE; host->clk_scaling.state = MMC_LOAD_HIGH; /* Set to max. frequency when disabling */ @@ -750,8 +770,10 @@ static ssize_t store_enable(struct device *dev, } else if (value) { /* Unmask host capability and resume scaling */ host->caps2 |= MMC_CAP2_CLK_SCALE; - if (!host->clk_scaling.enable) + if (!host->clk_scaling.enable) { + host->clk_scaling.enable = true; mmc_resume_clk_scaling(host); + } } mmc_put_card(host->card); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 695d14c5db47d0556943df1259b02f2f59628617..46eceace3a11fdcfd15531361d1746131f747d05 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2952,12 +2952,6 @@ static int mmc_reset(struct mmc_host *host) struct mmc_card *card = host->card; int ret; - /* - * In the case of recovery, we can't expect flushing the cache to work - * always, but we have a go and ignore errors. - */ - mmc_flush_cache(host->card); - if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset && mmc_can_reset(card)) { mmc_host_clk_hold(host); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e2dd4c03b20e430195addfe71daa965b2fd9ff55..804dc190b53c337108cfcb8fe73a2e896eece76c 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -208,6 +208,8 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card) host->max_req_size / 512)); blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segments(mq->queue, host->max_segs); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); } static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) @@ -464,6 +466,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); + if (host->inlinecrypt_support) + queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, mq->queue); sema_init(&mq->thread_sem, 1); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 09f6823909dfeefcfbf612a1a2c6e8d44931793d..4b1df1c15d9c233e28502f3ab9a47a8c2dfd2dbf 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -438,6 +438,17 @@ config MMC_SDHCI_MSM If unsure, say N. +config MMC_SDHCI_MSM_ICE + bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core" + depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE + help + This selects the QTI specific additions to support Inline Crypto + Engine (ICE). ICE accelerates the crypto operations and maintains + the high SDHCI performance. + + Select this if you have ICE supported for SDHCI on QTI chipset. + If unsure, say N. + config MMC_MXC tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" depends on ARCH_MXC || PPC_MPC512x diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c index daa39c8eacd6d290a47bef20b3d3df5989aed844..317d8c3bfb0e7b56acae83812be91c09a6cec206 100644 --- a/drivers/mmc/host/sdhci-msm-ice.c +++ b/drivers/mmc/host/sdhci-msm-ice.c @@ -244,8 +244,8 @@ int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req, } static -void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, - u32 slot, unsigned int bypass, short key_index) +void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot, + unsigned int bypass, short key_index, u32 cdu_sz) { unsigned int ctrl_info_val = 0; @@ -257,7 +257,7 @@ void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, /* Configure data unit size of transfer request */ ctrl_info_val |= - (SDHCI_MSM_ICE_TR_DATA_UNIT_512_B & + (cdu_sz & MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU) << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU; @@ -311,7 +311,7 @@ void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host, */ /* Configure ICE bypass mode */ crypto_params |= - (!bypass & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE) + ((!bypass) & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE) << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE; /* Configure Crypto Configure Index (CCI) */ crypto_params |= (key_index & @@ -335,8 +335,9 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, struct sdhci_msm_host *msm_host = pltfm_host->priv; int err = 0; short key_index = 0; - sector_t lba = 0; + u64 dun = 0; unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; + u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; struct request *req; if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { @@ -349,8 +350,17 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, if (!mrq) return -EINVAL; req = mrq->req; - if (req) { - lba = req->__sector; + if (req && req->bio) { +#ifdef CONFIG_PFK + if (bio_dun(req->bio)) { + dun = bio_dun(req->bio); + cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; + } else { + dun = req->__sector; + } +#else + dun = req->__sector; +#endif err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); if (err) return err; @@ -362,11 +372,12 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq, if (msm_host->ice_hci_support) { /* For ICE HCI / ICE3.0 */ - sdhci_msm_ice_hci_update_noncq_cfg(host, lba, bypass, + sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass, key_index); } else { /* For ICE versions earlier to ICE3.0 */ - sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index); + sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, + cdu_sz); } return 0; } @@ -378,9 +389,10 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, struct sdhci_msm_host *msm_host = pltfm_host->priv; int err = 0; short key_index = 0; - sector_t lba = 0; + u64 dun = 0; unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS; struct request *req; + u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B; if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) { pr_err("%s: ice is in invalid state %d\n", @@ -392,8 +404,17 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, if (!mrq) return -EINVAL; req = mrq->req; - if (req) { - lba = req->__sector; + if (req && req->bio) { +#ifdef CONFIG_PFK + if (bio_dun(req->bio)) { + dun = bio_dun(req->bio); + cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB; + } else { + dun = req->__sector; + } +#else + dun = req->__sector; +#endif err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index); if (err) return err; @@ -405,11 +426,12 @@ int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host, if (msm_host->ice_hci_support) { /* For ICE HCI / ICE3.0 */ - sdhci_msm_ice_hci_update_cmdq_cfg(lba, bypass, key_index, + sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index, ice_ctx); } else { /* For ICE versions earlier to ICE3.0 */ - sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index); + sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index, + cdu_sz); } return 0; } diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 2137eb53d059bce85f0e5a8d886a65e463d1f9dd..c2f8edbddf8305eaddf36dacefa90bfb2e74c035 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -4987,6 +4987,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto vreg_deinit; } host->is_crypto_en = true; + msm_host->mmc->inlinecrypt_support = true; /* Packed commands cannot be encrypted/decrypted using ICE */ msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR | MMC_CAP2_PACKED_WR_CONTROL); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 7c0898c7b1c01d8863e7391ba6ee229bc1398280..32febee5c9a4ba20d1698bafced804c9698d71ea 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -157,6 +157,9 @@ void sdhci_dumpregs(struct sdhci_host *host) host->ops->dump_vendor_regs(host); sdhci_dump_state(host); SDHCI_DUMP("============================================\n"); + /* crash the system upon setting this debugfs. */ + if (host->mmc->crash_on_err) + BUG_ON(1); } EXPORT_SYMBOL_GPL(sdhci_dumpregs); diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 3baddfc997d139358ec63ff50f6d66c474e48ad9..b49ca02b399da0f4f2391185eab48081f5f03bd3 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -2544,6 +2544,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc, nand_set_flash_node(chip, dn); mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); + if (!mtd->name) + return -ENOMEM; + mtd->owner = THIS_MODULE; mtd->dev.parent = dev; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 36fddb199160b064f0d2c38363eb7e192437ed2c..f4b3554b0b67bcff2c27318e1ce1c5c1c3fb6032 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -752,7 +752,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); rpfl2multicast_flr_en_set(self, - IS_FILTER_ENABLED(IFF_MULTICAST), 0); + IS_FILTER_ENABLED(IFF_ALLMULTI), 0); rpfl2_accept_all_mc_packets_set(self, IS_FILTER_ENABLED(IFF_ALLMULTI)); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index fb5bed4d159ecbae735a4952ed39c5a16832f881..99883dff3bbb767ce313abc4ac653b5b20635e83 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -221,6 +221,7 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) hlist_del_init_rcu(&ep->hlnode); rmnet_unregister_bridge(dev, port); rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); kfree(ep); } rmnet_unregister_real_device(real_dev, port); @@ -244,7 +245,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev) port = rmnet_get_port_rtnl(dev); - rcu_read_lock(); rmnet_unregister_bridge(dev, port); hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { @@ -252,10 +252,10 @@ static void rmnet_force_unassociate_device(struct net_device *dev) rmnet_vnd_dellink(ep->mux_id, port, ep); hlist_del_init_rcu(&ep->hlnode); + synchronize_rcu(); kfree(ep); } - rcu_read_unlock(); unregister_netdevice_many(&list); qmi_rmnet_qmi_exit(port->qmi_info, port); @@ -587,6 +587,31 @@ void rmnet_clear_powersave_format(void *port) ((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS; } EXPORT_SYMBOL(rmnet_clear_powersave_format); + +void rmnet_enable_all_flows(void *port) +{ + struct rmnet_endpoint *ep; + unsigned long bkt; + + if (unlikely(!port)) + return; + + rcu_read_lock(); + hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep, + bkt, ep, hlnode) { + qmi_rmnet_enable_all_flows(ep->egress_dev); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(rmnet_enable_all_flows); + +int rmnet_get_powersave_notif(void *port) +{ + if (!port) + return 0; + return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF; +} +EXPORT_SYMBOL(rmnet_get_powersave_notif); #endif /* Startup/Shutdown */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 3124e0765f440a227ed6036bb9d7de0772447358..a800e2aa904a64f0328e12513d4670ff4feb19f8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -110,6 +110,26 @@ struct rmnet_priv { void __rcu *qos_info; }; +enum rmnet_dl_marker_prio { + RMNET_PERF, + RMNET_SHS, +}; + +enum rmnet_trace_func { + RMNET_MODULE, + NW_STACK_MODULE, +}; + +enum rmnet_trace_evt { + RMNET_DLVR_SKB, + RMNET_RCV_FROM_PND, + RMNET_TX_UL_PKT, + NW_STACK_DEV_Q_XMIT, + NW_STACK_NAPI_GRO_FLUSH, + NW_STACK_RX, + NW_STACK_TX, +}; + int rmnet_is_real_dev_registered(const struct net_device *real_dev); struct rmnet_port *rmnet_get_port(struct net_device *real_dev); struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index dcb02284dc2815b4d034176c9bb6a4ef1330d099..e4ed5bd72a7ca59bd40345678730285510ff0fd3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "rmnet_private.h" #include "rmnet_config.h" #include "rmnet_vnd.h" @@ -27,10 +28,26 @@ #ifdef CONFIG_QCOM_QMI_HELPERS #include #include + #endif #define RMNET_IP_VERSION_4 0x40 #define RMNET_IP_VERSION_6 0x60 +#define CREATE_TRACE_POINTS +#include "rmnet_trace.h" + +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_err); /* Helper Functions */ @@ -80,6 +97,8 @@ rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port); struct rmnet_priv *priv = netdev_priv(skb->dev); + trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF, + 0xDEF, 0xDEF, (void *)skb, NULL); skb_reset_transport_header(skb); skb_reset_network_header(skb); rmnet_vnd_rx_fixup(skb->dev, skb->len); @@ -156,11 +175,8 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb->ip_summed = CHECKSUM_UNNECESSARY; } - if ((port->data_format & RMNET_INGRESS_FORMAT_PS) && - !qmi_rmnet_work_get_active(port)) { - /* register for powersave indications*/ - qmi_rmnet_work_restart(port); - } + if (port->data_format & RMNET_INGRESS_FORMAT_PS) + qmi_rmnet_work_maybe_restart(port); skb_trim(skb, len); rmnet_deliver_skb(skb, port); @@ -234,15 +250,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, } if (skb_headroom(skb) < required_headroom) { - if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) + if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) return -ENOMEM; } - if ((port->data_format & RMNET_INGRESS_FORMAT_PS) && - !qmi_rmnet_work_get_active(port)) { - /* register for powersave indications*/ - qmi_rmnet_work_restart(port); - } + if (port->data_format & RMNET_INGRESS_FORMAT_PS) + qmi_rmnet_work_maybe_restart(port); if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) rmnet_map_checksum_uplink_packet(skb, orig_dev); @@ -303,6 +316,8 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) if (skb->pkt_type == PACKET_LOOPBACK) return RX_HANDLER_PASS; + trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF, + 0xDEF, 0xDEF, 0xDEF, NULL, NULL); dev = skb->dev; port = rmnet_get_port(dev); @@ -333,6 +348,8 @@ void rmnet_egress_handler(struct sk_buff *skb) int err; u32 skb_len; + trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF, + 0xDEF, (void *)skb, NULL); sk_pacing_shift_update(skb->sk, 8); orig_dev = skb->dev; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 3db9038446307175ba60cd2b4562db1cc3781e8b..4e1aa2d4df3b4f9e864bf414d33db059dcf8cb8c 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -115,6 +115,7 @@ struct rmnet_map_dl_ind_trl { } __aligned(1); struct rmnet_map_dl_ind { + u8 priority; void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); struct list_head list; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 56457f72a3c675ab328a6a842ded9328f910ea67..8107da7de386e853fda56b8613eb4ed394368dfc 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -270,11 +270,38 @@ void rmnet_map_cmd_init(struct rmnet_port *port) int rmnet_map_dl_ind_register(struct rmnet_port *port, struct rmnet_map_dl_ind *dl_ind) { + struct rmnet_map_dl_ind *dl_ind_iterator; + bool empty_ind_list = true; + if (!port || !dl_ind || !dl_ind->dl_hdr_handler || !dl_ind->dl_trl_handler) return -EINVAL; - list_add_rcu(&dl_ind->list, &port->dl_list); + list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) { + empty_ind_list = false; + if (dl_ind_iterator->priority < dl_ind->priority) { + if (dl_ind_iterator->list.next) { + if (dl_ind->priority + < list_entry_rcu(dl_ind_iterator->list.next, + typeof(*dl_ind_iterator), list)->priority) { + list_add_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } else { + list_add_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } else { + list_add_tail_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } + + if (empty_ind_list) + list_add_rcu(&dl_ind->list, &port->dl_list); return 0; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index a3185ec968c6b7373d3c1792c8fadf6c59311cd0..009954a74f88f7f654dff64049910e8410547643 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -25,6 +25,7 @@ /* Power save feature*/ #define RMNET_INGRESS_FORMAT_PS BIT(27) +#define RMNET_FORMAT_PS_NOTIF BIT(26) /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..02adc618db1e1ccf36cbb4c5f9e6d2409065bec5 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h @@ -0,0 +1,480 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rmnet +#define TRACE_INCLUDE_FILE rmnet_trace + +#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _RMNET_TRACE_H_ + +#include +#include +#include + +/*****************************************************************************/ +/* Trace events for rmnet module */ +/*****************************************************************************/ +TRACE_EVENT + (rmnet_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +/*****************************************************************************/ +/* Trace events for rmnet_perf module */ +/*****************************************************************************/ +TRACE_EVENT + (rmnet_perf_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_perf_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_perf_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) +/*****************************************************************************/ +/* Trace events for rmnet_shs module */ +/*****************************************************************************/ +TRACE_EVENT + (rmnet_shs_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_shs_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_shs_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_shs_wq_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_shs_wq_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +TRACE_EVENT + (rmnet_shs_wq_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + + TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) +#endif /* _RMNET_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 106c270ea933579561df4630ed4f1c79d9657d1b..fbd22f4343c585c3c8c24d68cf52872f2a291af6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -166,7 +166,13 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev, void *accel_priv, select_queue_fallback_t fallback) { - return 0; + struct rmnet_priv *priv = netdev_priv(dev); + int txq = 0; + + if (priv->real_dev) + txq = qmi_rmnet_get_queue(dev, skb); + + return (txq < dev->real_num_tx_queues) ? txq : 0; } static const struct net_device_ops rmnet_vnd_ops = { diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3df872f56289a2be0de52e98e641501d51fbf174..37026473cf6dad834165ecc40af0c300fff6b963 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -376,7 +376,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) * because generally mcdi responses are fast. After that, back off * and poll once a jiffy (approximately) */ - spins = TICK_USEC; + spins = USER_TICK_USEC; finish = jiffies + MCDI_RPC_TIMEOUT; while (1) { diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c index 95cc6be54ad5a058be577398c918c9f7511df690..9a3564cc7ca930619dddd397afbe6c25986b5548 100644 --- a/drivers/net/ppp/pppolac.c +++ b/drivers/net/ppp/pppolac.c @@ -83,7 +83,7 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) /* Put it back if it is a control packet. */ if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT) - return opt->backlog_rcv(sk_udp, skb); + return 2; /* Skip UDP header. */ skb_pull(skb, sizeof(struct udphdr)); @@ -190,9 +190,10 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb) { + int retval; sock_hold(sk_udp); - sk_receive_skb(sk_udp, skb, 0); - return 0; + retval = sk_receive_skb(sk_udp, skb, 0); + return (retval >> 1); } static struct sk_buff_head delivery_queue; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 63d8a99e404cc33372ee5c54abe3a483bec13e70..cc802b468f5c7932c472edfbd5df1c38a256fb77 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -25,7 +25,8 @@ #include "fw.h" #define WIL_MAX_ROC_DURATION_MS 5000 -#define CTRY_CHINA "CN" +#define WIL_BRD_SUFFIX_CN "CN" +#define WIL_BRD_SUFFIX_FCC "FCC" bool disable_ap_sme; module_param(disable_ap_sme, bool, 0444); @@ -64,6 +65,25 @@ static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(4, 0), }; +struct wil_regd_2_brd_suffix { + const char regdomain[3]; /* alpha2 */ + const char *brd_suffix; +}; + +static struct wil_regd_2_brd_suffix wil_regd_2_brd_suffix_map[] = { + {"BO", WIL_BRD_SUFFIX_FCC}, + {"CN", WIL_BRD_SUFFIX_CN}, + {"EC", WIL_BRD_SUFFIX_FCC}, + {"GU", WIL_BRD_SUFFIX_FCC}, + {"HN", WIL_BRD_SUFFIX_FCC}, + {"JM", WIL_BRD_SUFFIX_FCC}, + {"MX", WIL_BRD_SUFFIX_FCC}, + {"NI", WIL_BRD_SUFFIX_FCC}, + {"PY", WIL_BRD_SUFFIX_FCC}, + {"TT", WIL_BRD_SUFFIX_FCC}, + {"US", WIL_BRD_SUFFIX_FCC}, +}; + enum wil_nl_60g_cmd_type { NL_60G_CMD_FW_WMI, NL_60G_CMD_DEBUG, @@ -2491,24 +2511,43 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, return 0; } +static void wil_get_brd_reg_suffix(struct wil6210_priv *wil, + const u8 *new_regdomain, + char *brd_reg_suffix, size_t len) +{ + int i; + struct wil_regd_2_brd_suffix *entry; + + for (i = 0; i < ARRAY_SIZE(wil_regd_2_brd_suffix_map); i++) { + entry = &wil_regd_2_brd_suffix_map[i]; + if (!memcmp(entry->regdomain, new_regdomain, 2)) { + strlcpy(brd_reg_suffix, entry->brd_suffix, len); + return; + } + } + + /* regdomain not found in our map, set suffix to none */ + brd_reg_suffix[0] = '\0'; +} + static int wil_switch_board_file(struct wil6210_priv *wil, const u8 *new_regdomain) { int rc = 0; + char brd_reg_suffix[WIL_BRD_SUFFIX_LEN]; if (!country_specific_board_file) return 0; - if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) { - wil_info(wil, "moving out of China reg domain, use default board file\n"); - wil->board_file_country[0] = '\0'; - } else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) { - wil_info(wil, "moving into China reg domain, use country specific board file\n"); - strlcpy(wil->board_file_country, CTRY_CHINA, - sizeof(wil->board_file_country)); - } else { + wil_get_brd_reg_suffix(wil, new_regdomain, brd_reg_suffix, + sizeof(brd_reg_suffix)); + if (!strcmp(wil->board_file_reg_suffix, brd_reg_suffix)) return 0; - } + + wil_info(wil, "switch board file suffix '%s' => '%s'\n", + wil->board_file_reg_suffix, brd_reg_suffix); + strlcpy(wil->board_file_reg_suffix, brd_reg_suffix, + sizeof(wil->board_file_reg_suffix)); /* need to switch board file - reset the device */ diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 6c8f8c60e6a0eef34a2143d140d57222e9b721d3..e77366fd8d78061dab8091d3e4f554fedc87b659 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1194,19 +1194,19 @@ void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len) board_file = WIL_BOARD_FILE_NAME; } - if (wil->board_file_country[0] == '\0') { + if (wil->board_file_reg_suffix[0] == '\0') { strlcpy(buf, board_file, len); return; } /* use country specific board file */ - if (len < strlen(board_file) + 4 /* for _XX and terminating null */) + if (len < strlen(board_file) + 1 + WIL_BRD_SUFFIX_LEN) /* 1 for '_' */ return; ext = strrchr(board_file, '.'); prefix_len = (ext ? ext - board_file : strlen(board_file)); - snprintf(buf, len, "%.*s_%.2s", - prefix_len, board_file, wil->board_file_country); + snprintf(buf, len, "%.*s_%.3s", + prefix_len, board_file, wil->board_file_reg_suffix); if (ext) strlcat(buf, ext, len); } diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 93fcc55bcd22b77514ed7a6e0a0b76b2b6002aca..3a4194779ddf6371da82b934565bbf6c69eb2c27 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -93,7 +93,11 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) if (wmi_only || debug_fw) { wil_dbg_pm(wil, "Deny any suspend - %s mode\n", wmi_only ? "wmi_only" : "debug_fw"); - rc = -EPERM; + rc = -EBUSY; + goto out; + } + if (is_runtime && !wil->platform_ops.suspend) { + rc = -EBUSY; goto out; } diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index f53c356cc78449e8e6fca0efa2d1cda9c3d0abd7..59d0bc752fd07fc53ec6a0388de3318a90860432 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -1150,7 +1150,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, /* Total number of completed descriptors in all descriptor rings */ int desc_cnt = 0; int cid; - struct wil_net_stats *stats = NULL; + struct wil_net_stats *stats; struct wil_tx_enhanced_desc *_d; unsigned int ring_id; unsigned int num_descs; @@ -1200,8 +1200,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, ndev = vif_to_ndev(vif); cid = wil->ring2cid_tid[ring_id][0]; - if (cid < WIL6210_MAX_CID) - stats = &wil->sta[cid].stats; + stats = (cid < WIL6210_MAX_CID ? &wil->sta[cid].stats : NULL); wil_dbg_txrx(wil, "tx_status: completed desc_ring (%d), num_descs (%d)\n", diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 48136a1f8d8492bedbca66e1addb9642cbf0d4de..efa8a46a55602733b1b68346cd099ad004505c67 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -69,6 +69,8 @@ union wil_tx_desc; */ #define WIL_MAX_VIFS 4 +#define WIL_BRD_SUFFIX_LEN 4 /* max 3 letters + terminating null */ + /** * extract bits [@b0:@b1] (inclusive) from the value @x * it should be @b0 <= @b1, or result is incorrect @@ -916,7 +918,7 @@ struct wil6210_priv { const char *hw_name; const char *wil_fw_name; char *board_file; - char board_file_country[3]; /* alpha2 */ + char board_file_reg_suffix[WIL_BRD_SUFFIX_LEN]; /* empty or CN or FCC */ u32 brd_file_addr; u32 brd_file_max_size; DECLARE_BITMAP(hw_capa, hw_capa_last); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index c4c7e46f595489ebb58625c51599c8c5316a883e..00ab8a49a7b2854ec3c9fce5eb3f9336b69db0d8 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -247,6 +247,7 @@ struct cnss_plat_data { u32 diag_reg_read_len; u8 *diag_reg_read_buf; bool cal_done; + bool powered_on; char firmware_name[13]; }; diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 05681493f52079a240b0c2bb5c78efd35d8621bd..7d80e5b90b79b86b841c467957dec419980d4a75 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -780,9 +780,8 @@ static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv) int ret = 0; struct device *dev; struct dma_iommu_mapping *mapping; - int atomic_ctx = 1; - int s1_bypass = 1; - int fast = 1; + int atomic_ctx = 1, s1_bypass = 1, fast = 1, cb_stall_disable = 1, + no_cfre = 1; cnss_pr_dbg("Initializing SMMU\n"); @@ -817,6 +816,24 @@ static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv) ret); goto release_mapping; } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_CB_STALL_DISABLE, + &cb_stall_disable); + if (ret) { + pr_err("Failed to set SMMU cb_stall_disable attribute, err = %d\n", + ret); + goto release_mapping; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_NO_CFRE, + &no_cfre); + if (ret) { + pr_err("Failed to set SMMU no_cfre attribute, err = %d\n", + ret); + goto release_mapping; + } } else { ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, @@ -1251,6 +1268,74 @@ int cnss_pm_request_resume(struct cnss_pci_data *pci_priv) return pm_request_resume(&pci_dev->dev); } +int cnss_pci_force_wake_request(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct mhi_controller *mhi_ctrl; + + if (!pci_priv) + return -ENODEV; + + if (pci_priv->device_id != QCA6390_DEVICE_ID) + return 0; + + mhi_ctrl = pci_priv->mhi_ctrl; + if (!mhi_ctrl) + return -EINVAL; + + read_lock_bh(&mhi_ctrl->pm_lock); + mhi_ctrl->wake_get(mhi_ctrl, true); + read_unlock_bh(&mhi_ctrl->pm_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_pci_force_wake_request); + +int cnss_pci_is_device_awake(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct mhi_controller *mhi_ctrl; + + if (!pci_priv) + return -ENODEV; + + if (pci_priv->device_id != QCA6390_DEVICE_ID) + return true; + + mhi_ctrl = pci_priv->mhi_ctrl; + if (!mhi_ctrl) + return -EINVAL; + + return mhi_ctrl->dev_state == MHI_STATE_M0 ? true : false; +} +EXPORT_SYMBOL(cnss_pci_is_device_awake); + +int cnss_pci_force_wake_release(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct mhi_controller *mhi_ctrl; + + if (!pci_priv) + return -ENODEV; + + if (pci_priv->device_id != QCA6390_DEVICE_ID) + return 0; + + mhi_ctrl = pci_priv->mhi_ctrl; + if (!mhi_ctrl) + return -EINVAL; + + read_lock_bh(&mhi_ctrl->pm_lock); + mhi_ctrl->wake_put(mhi_ctrl, false); + read_unlock_bh(&mhi_ctrl->pm_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_pci_force_wake_release); + int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; @@ -1864,6 +1949,11 @@ static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl, void *priv, cnss_pr_dbg("MHI status cb is called with reason %d\n", reason); + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + cnss_pr_dbg("Driver unload is in progress, ignore device error\n"); + return; + } + if (pci_priv->driver_ops && pci_priv->driver_ops->update_status) pci_priv->driver_ops->update_status(pci_priv->pci_dev, CNSS_FW_DOWN); diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c index 29122498b5f449efc82cacc9ca71992bef61c744..71cac4e2950d608878ae880fe2c4d3ca42eef7cd 100644 --- a/drivers/net/wireless/cnss2/power.c +++ b/drivers/net/wireless/cnss2/power.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,8 @@ static struct cnss_vreg_info cnss_vreg_info[] = { {NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0}, {NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2}, {NULL, "vdd-wlan", 0, 0, 0, 0}, + {NULL, "vdd-wlan-ctrl1", 0, 0, 0, 0}, + {NULL, "vdd-wlan-ctrl2", 0, 0, 0, 0}, {NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0}, {NULL, "wlan-ant-switch", 2700000, 2700000, 20000, 0}, {NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0}, @@ -344,6 +346,11 @@ int cnss_power_on_device(struct cnss_plat_data *plat_priv) { int ret = 0; + if (plat_priv->powered_on) { + cnss_pr_dbg("Already powered up"); + return 0; + } + ret = cnss_vreg_on(plat_priv); if (ret) { cnss_pr_err("Failed to turn on vreg, err = %d\n", ret); @@ -355,6 +362,7 @@ int cnss_power_on_device(struct cnss_plat_data *plat_priv) cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret); goto vreg_off; } + plat_priv->powered_on = true; return 0; vreg_off: @@ -365,8 +373,14 @@ int cnss_power_on_device(struct cnss_plat_data *plat_priv) void cnss_power_off_device(struct cnss_plat_data *plat_priv) { + if (!plat_priv->powered_on) { + cnss_pr_dbg("Already powered down"); + return; + } + cnss_select_pinctrl_state(plat_priv, false); cnss_vreg_off(plat_priv); + plat_priv->powered_on = false; } void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv) diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index 6a38b66cbb38854e0ba59ed6d6e30c71d4003f71..2551cb667d5d51d821a28e95c3baf01ecd1a9861 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -47,7 +47,7 @@ MODULE_PARM_DESC(qmi_timeout, "Timeout for QMI message in milliseconds"); #define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K -static bool daemon_support; +static bool daemon_support = true; module_param(daemon_support, bool, 0600); MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not"); diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c index 49bf8ce742e8019a2dcdaa443e6d51d6b2e8888e..c296aec17253cce27f6b281203c64af489ac3a91 100644 --- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c +++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c @@ -11,9 +11,14 @@ */ #include #include +#include #include #include #include +#include +#ifdef CONFIG_WCNSS_SKB_PRE_ALLOC +#include +#endif #include static DEFINE_SPINLOCK(alloc_lock); @@ -22,6 +27,11 @@ static DEFINE_SPINLOCK(alloc_lock); #define WCNSS_MAX_STACK_TRACE 64 #endif +#define PRE_ALLOC_DEBUGFS_DIR "cnss-prealloc" +#define PRE_ALLOC_DEBUGFS_FILE_OBJ "status" + +static struct dentry *debug_base; + struct wcnss_prealloc { int occupied; size_t size; @@ -94,6 +104,13 @@ static struct wcnss_prealloc wcnss_allocs[] = { {0, 32 * 1024, NULL}, {0, 32 * 1024, NULL}, {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, + {0, 32 * 1024, NULL}, {0, 64 * 1024, NULL}, {0, 64 * 1024, NULL}, {0, 64 * 1024, NULL}, @@ -227,14 +244,89 @@ int wcnss_pre_alloc_reset(void) } EXPORT_SYMBOL(wcnss_pre_alloc_reset); +static int prealloc_memory_stats_show(struct seq_file *fp, void *data) +{ + int i = 0; + int used_slots = 0, free_slots = 0; + unsigned int tsize = 0, tused = 0, size = 0; + + seq_puts(fp, "\nSlot_Size(Kb)\t\t[Used : Free]\n"); + for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) { + tsize += wcnss_allocs[i].size; + if (size != wcnss_allocs[i].size) { + if (size) { + seq_printf( + fp, "[%d : %d]\n", + used_slots, free_slots); + } + + size = wcnss_allocs[i].size; + used_slots = 0; + free_slots = 0; + seq_printf(fp, "%d Kb\t\t\t", size / 1024); + } + + if (wcnss_allocs[i].occupied) { + tused += wcnss_allocs[i].size; + ++used_slots; + } else { + ++free_slots; + } + } + seq_printf(fp, "[%d : %d]\n", used_slots, free_slots); + + /* Convert byte to Kb */ + if (tsize) + tsize = tsize / 1024; + if (tused) + tused = tused / 1024; + seq_printf(fp, "\nMemory Status:\nTotal Memory: %dKb\n", tsize); + seq_printf(fp, "Used: %dKb\nFree: %dKb\n", tused, tsize - tused); + + return 0; +} + +static int prealloc_memory_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, prealloc_memory_stats_show, NULL); +} + +static const struct file_operations prealloc_memory_stats_fops = { + .owner = THIS_MODULE, + .open = prealloc_memory_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int __init wcnss_pre_alloc_init(void) { - return wcnss_prealloc_init(); + int ret; + + ret = wcnss_prealloc_init(); + if (ret) { + pr_err("%s: Failed to init the prealloc pool\n", __func__); + return ret; + } + + debug_base = debugfs_create_dir(PRE_ALLOC_DEBUGFS_DIR, NULL); + if (IS_ERR_OR_NULL(debug_base)) { + pr_err("%s: Failed to create debugfs dir\n", __func__); + } else if (IS_ERR_OR_NULL(debugfs_create_file( + PRE_ALLOC_DEBUGFS_FILE_OBJ, + 0644, debug_base, NULL, + &prealloc_memory_stats_fops))) { + pr_err("%s: Failed to create debugfs file\n", __func__); + debugfs_remove_recursive(debug_base); + } + + return ret; } static void __exit wcnss_pre_alloc_exit(void) { wcnss_prealloc_deinit(); + debugfs_remove_recursive(debug_base); } module_init(wcnss_pre_alloc_init); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index dfc076f9ee4b582ada02f5a946fcef18ae91d2ad..d5e790dd589a27bf1d4094d152c791cab6fb7248 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { - struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - if (shinfo->nr_frags == MAX_SKB_FRAGS) { + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c index 9d5d6758dca988a7cb234342f1f5aa7ddbdddffb..62b42616ec90c963e448b85b6c944efeab6c7820 100644 --- a/drivers/nfc/nq-nci.c +++ b/drivers/nfc/nq-nci.c @@ -676,6 +676,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev) { int ret = 0; + int gpio_retry_count = 0; unsigned char init_rsp_len = 0; unsigned int enable_gpio = nqx_dev->en_gpio; char *nci_reset_cmd = NULL; @@ -707,6 +708,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev) goto done; } +reset_enable_gpio: /* making sure that the NFCC starts in a clean state. */ gpio_set_value(enable_gpio, 0);/* ULPM: Disable */ /* hardware dependent delay */ @@ -734,6 +736,9 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev) if (ret < 0) { dev_err(&client->dev, "%s: - i2c_master_recv Error\n", __func__); + gpio_retry_count = gpio_retry_count + 1; + if (gpio_retry_count < MAX_RETRY_COUNT) + goto reset_enable_gpio; goto err_nfcc_hw_check; } nci_init_cmd[0] = 0x20; diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 4523d7e1bcb9c7d66528105306bf3b688c8a1e05..ffc87a956d97460e9e1bca6e3b02c78c65f65b0a 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -53,6 +53,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 59cec9e6c8a569772c97a500a7c9ee7e787ff100..ab781e1bf480c79cb4d402fc3af3db0799438604 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -5896,7 +5896,7 @@ static int msm_pcie_probe(struct platform_device *pdev) msm_pcie_dev[rc_idx].l1ss_supported; msm_pcie_dev[rc_idx].l1_1_pcipm_supported = msm_pcie_dev[rc_idx].l1ss_supported; - msm_pcie_dev[rc_idx].l1_1_pcipm_supported = + msm_pcie_dev[rc_idx].l1_2_pcipm_supported = msm_pcie_dev[rc_idx].l1ss_supported; msm_pcie_dev[rc_idx].common_clk_en = diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 721a2a1c97ef43299c3e143744297764943b4b92..a63bba12aee42a01c8478d9647fe2873abc97076 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -438,9 +438,9 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy, u32 index = instance->index; u32 tmp; - /* switch to USB function. (system register, force ip into usb mode) */ + /* switch to USB function, and enable usb pll */ tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~P2C_FORCE_UART_EN; + tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM); tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0); writel(tmp, com + U3P_U2PHYDTM0); @@ -500,10 +500,8 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy, u32 index = instance->index; u32 tmp; - /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */ tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL); - tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK); + tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK); writel(tmp, com + U3P_U2PHYDTM0); /* OTG Enable */ @@ -538,7 +536,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, tmp = readl(com + U3P_U2PHYDTM0); tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN); - tmp |= P2C_FORCE_SUSPENDM; writel(tmp, com + U3P_U2PHYDTM0); /* OTG Disable */ @@ -546,18 +543,16 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN; writel(tmp, com + U3P_USBPHYACR6); - /* let suspendm=0, set utmi into analog power down */ - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~P2C_RG_SUSPENDM; - writel(tmp, com + U3P_U2PHYDTM0); - udelay(1); - tmp = readl(com + U3P_U2PHYDTM1); tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID); tmp |= P2C_RG_SESSEND; writel(tmp, com + U3P_U2PHYDTM1); if (tphy->pdata->avoid_rx_sen_degradation && index) { + tmp = readl(com + U3P_U2PHYDTM0); + tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); + writel(tmp, com + U3P_U2PHYDTM0); + tmp = readl(com + U3D_U2PHYDCR0); tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; writel(tmp, com + U3D_U2PHYDCR0); diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c index 885592078a853a4a0e2d1e6ebc07653b4b1fd70c..78eab289efb88215b686fd2a2ec69b7db37f360c 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.c @@ -35,9 +35,19 @@ int ufs_qcom_phy_qmp_v4_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, */ ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A, ARRAY_SIZE(phy_cal_table_rate_A)); - if (ufs_qcom_phy->lanes_per_direction == 2) + if ((major == 0x4) && (minor == 0x001) && (step == 0x0000)) + ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_v2, + ARRAY_SIZE(phy_cal_table_rate_A_v2)); + + if (ufs_qcom_phy->lanes_per_direction == 2) { ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_2nd_lane, ARRAY_SIZE(phy_cal_table_2nd_lane)); + if ((major == 0x4) && (minor == 0x001) && (step == 0x0000)) + ufs_qcom_phy_write_tbl(ufs_qcom_phy, + phy_cal_table_2nd_lane_v2, + ARRAY_SIZE(phy_cal_table_2nd_lane_v2)); + } + if (is_rate_B) ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B, ARRAY_SIZE(phy_cal_table_rate_B)); diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h index b998d776b336e83a8c82afd83a222cc24aef384f..24cd78889aade6a1dd1a006f3688de625c465373 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h +++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h @@ -263,6 +263,16 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_GAIN, 0x04), }; +static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_v2[] = { + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x35), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x5A), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0E), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x6D), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x6D), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0xED), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x3C), +}; + static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03), @@ -307,6 +317,16 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_GAIN, 0x04), }; +static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_v2[] = { + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x35), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x5A), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0E), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x6D), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x6D), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0xED), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x3C), +}; + static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x06), }; diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 82473d18075e15e354cbdddcdcf3866063c53ed9..bf52590588e56501cce6e2e8ce50810587ef4f06 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -189,6 +189,15 @@ config PINCTRL_SM6150 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SM6150 platform. +config PINCTRL_TRINKET + tristate "Qualcomm Technologies Inc TRINKET pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc TLMM block found on the Qualcomm + Technologies Inc TRINKET platform. + config PINCTRL_SDXPRAIRIE tristate "Qualcomm Technologies Inc SDXPRAIRIE pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index fe98640c1fbccb9b99a4e7b1d09abe478e11f775..0156e5619b9e2ea9100293635960e30190aeb588 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o obj-$(CONFIG_PINCTRL_SDMSHRIKE) += pinctrl-sdmshrike.o obj-$(CONFIG_PINCTRL_SM6150) += pinctrl-sm6150.o +obj-$(CONFIG_PINCTRL_TRINKET) += pinctrl-trinket.o obj-$(CONFIG_PINCTRL_SDXPRAIRIE) += pinctrl-sdxprairie.o obj-$(CONFIG_PINCTRL_SDMMAGPIE) += pinctrl-sdmmagpie.o obj-$(CONFIG_PINCTRL_SLPI) += pinctrl-slpi.o diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index fef0970abaf24687ee60b252d61176ca14f38295..19b37b7cc11496a20b242827eb8179a2f74c8f43 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -71,6 +71,7 @@ struct msm_pinctrl { const struct msm_pinctrl_soc_data *soc; void __iomem *regs; + void __iomem *pdc_regs; #ifdef CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE /* For holding per tile virtual address */ void __iomem *per_tile_regs[4]; @@ -975,6 +976,158 @@ static const struct irq_domain_ops msm_gpio_domain_ops = { .free = irq_domain_free_irqs_top, }; +static struct irq_chip msm_dirconn_irq_chip; + +static void msm_gpio_dirconn_handler(struct irq_desc *desc) +{ + struct irq_data *irqd = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + generic_handle_irq(irqd->irq); + chained_irq_exit(chip, desc); +} + +static void setup_pdc_gpio(struct irq_domain *domain, + unsigned int parent_irq, unsigned int gpio) +{ + int irq; + + if (gpio != 0) { + irq = irq_create_mapping(domain, gpio); + irq_set_parent(irq, parent_irq); + irq_set_chip(irq, &msm_dirconn_irq_chip); + irq_set_handler_data(parent_irq, irq_get_irq_data(irq)); + } + + __irq_set_handler(parent_irq, msm_gpio_dirconn_handler, false, NULL); +} + +static void request_dc_interrupt(struct irq_domain *domain, + struct irq_domain *parent, irq_hw_number_t hwirq, + unsigned int gpio) +{ + struct irq_fwspec fwspec; + unsigned int parent_irq; + + fwspec.fwnode = parent->fwnode; + fwspec.param[0] = 0; /* SPI */ + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_NONE; + fwspec.param_count = 3; + + parent_irq = irq_create_fwspec_mapping(&fwspec); + + setup_pdc_gpio(domain, parent_irq, gpio); +} + +/** + * gpio_muxed_to_pdc: Mux the GPIO to a PDC IRQ + * + * @pdc_domain: the PDC's domain + * @d: the GPIO's IRQ data + * + * Find a free PDC port for the GPIO and map the GPIO's mux information to the + * PDC registers; so the GPIO can be used a wakeup source. + */ +static void gpio_muxed_to_pdc(struct irq_domain *pdc_domain, struct irq_data *d) +{ + int i, j; + unsigned int mux; + struct irq_desc *desc = irq_data_to_desc(d); + struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + unsigned int gpio = d->hwirq; + struct msm_pinctrl *pctrl; + unsigned int irq; + + if (!gc || !parent_data) + return; + + pctrl = gpiochip_get_data(gc); + + for (i = 0; i < pctrl->soc->n_gpio_mux_in; i++) { + if (gpio != pctrl->soc->gpio_mux_in[i].gpio) + continue; + mux = pctrl->soc->gpio_mux_in[i].mux; + for (j = 0; j < pctrl->soc->n_pdc_mux_out; j++) { + struct msm_pdc_mux_output *pdc_out = + &pctrl->soc->pdc_mux_out[j]; + + if (pdc_out->mux == mux) + break; + if (pdc_out->mux) + continue; + pdc_out->mux = gpio; + irq = irq_find_mapping(pdc_domain, pdc_out->hwirq + 32); + /* setup the IRQ parent for the GPIO */ + setup_pdc_gpio(pctrl->chip.irqdomain, irq, gpio); + /* program pdc select grp register */ + writel_relaxed((mux & 0x3F), pctrl->pdc_regs + + (0x14 * j)); + break; + } + /* We have no more PDC port available */ + WARN_ON(j == pctrl->soc->n_pdc_mux_out); + } +} + +static bool is_gpio_tlmm_dc(struct irq_data *d, u32 type) +{ + const struct msm_pingroup *g; + unsigned long flags; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl; + bool ret = false; + unsigned int polarity = 0, offset, val; + int i; + void __iomem *base; + + if (!gc) + return false; + + pctrl = gpiochip_get_data(gc); + + for (i = 0; i < pctrl->soc->n_dir_conns; i++) { + struct msm_dir_conn *dir_conn = (struct msm_dir_conn *) + &pctrl->soc->dir_conn[i]; + + if (dir_conn->gpio == d->hwirq && dir_conn->tlmm_dc) { + ret = true; + offset = pctrl->soc->dir_conn_irq_base - + dir_conn->hwirq; + break; + } + } + + if (!ret) + return ret; + + if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) + return ret; + + /* + * Since the default polarity is set to 0, change it to 1 for + * Rising edge and active high interrupt type such that the line + * is not inverted. + */ + polarity = 1; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + g = &pctrl->soc->groups[d->hwirq]; + base = reassign_pctrl_reg(pctrl->soc, d->hwirq); + + val = readl_relaxed(base + g->dir_conn_reg + (offset * 4)); + val |= polarity << 8; + + writel_relaxed(val, base + g->dir_conn_reg + (offset * 4)); + + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return ret; +} + static bool is_gpio_dual_edge(struct irq_data *d, irq_hw_number_t *dir_conn_irq) { struct irq_desc *desc = irq_data_to_desc(d); @@ -995,6 +1148,17 @@ static bool is_gpio_dual_edge(struct irq_data *d, irq_hw_number_t *dir_conn_irq) return true; } } + + for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) { + struct msm_pdc_mux_output *dir_conn = + &pctrl->soc->pdc_mux_out[i]; + + if (dir_conn->mux == d->hwirq && (dir_conn->hwirq + 32) + != parent_data->hwirq) { + *dir_conn_irq = dir_conn->hwirq + 32; + return true; + } + } return false; } @@ -1012,9 +1176,13 @@ static void msm_dirconn_irq_mask(struct irq_data *d) irq_get_irq_data(irq_find_mapping(parent_data->domain, dir_conn_irq)); - if (dir_conn_data && dir_conn_data->chip->irq_mask) + if (!dir_conn_data) + return; + + if (dir_conn_data->chip->irq_mask) dir_conn_data->chip->irq_mask(dir_conn_data); } + if (parent_data->chip->irq_mask) parent_data->chip->irq_mask(parent_data); } @@ -1065,7 +1233,10 @@ static void msm_dirconn_irq_unmask(struct irq_data *d) irq_get_irq_data(irq_find_mapping(parent_data->domain, dir_conn_irq)); - if (dir_conn_data && dir_conn_data->chip->irq_unmask) + if (!dir_conn_data) + return; + + if (dir_conn_data->chip->irq_unmask) dir_conn_data->chip->irq_unmask(dir_conn_data); } if (parent_data->chip->irq_unmask) @@ -1264,12 +1435,12 @@ static int msm_dirconn_irq_set_type(struct irq_data *d, unsigned int type) if (!parent_data) return 0; - if (type == IRQ_TYPE_EDGE_BOTH) { + if (type == IRQ_TYPE_EDGE_BOTH) add_dirconn_tlmm(d, irq); - } else { - if (is_gpio_dual_edge(d, &irq)) - remove_dirconn_tlmm(d, irq); - } + else if (is_gpio_dual_edge(d, &irq)) + remove_dirconn_tlmm(d, irq); + else if (is_gpio_tlmm_dc(d, type)) + type = IRQ_TYPE_EDGE_RISING; if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); @@ -1333,57 +1504,72 @@ static void msm_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void msm_gpio_dirconn_handler(struct irq_desc *desc) -{ - struct irq_data *irqd = irq_desc_get_handler_data(desc); - struct irq_chip *chip = irq_desc_get_chip(desc); - - chained_irq_enter(chip, desc); - generic_handle_irq(irqd->irq); - chained_irq_exit(chip, desc); -} - static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl) { struct device_node *parent_node; - struct irq_domain *parent_domain; - struct irq_fwspec fwspec; + struct irq_domain *pdc_domain; unsigned int i; parent_node = of_irq_find_parent(pctrl->dev->of_node); - if (!parent_node) return; - parent_domain = irq_find_host(parent_node); - if (!parent_domain) + pdc_domain = irq_find_host(parent_node); + if (!pdc_domain) return; - fwspec.fwnode = parent_domain->fwnode; for (i = 0; i < pctrl->soc->n_dir_conns; i++) { const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i]; - unsigned int parent_irq; - int irq; - - fwspec.param[0] = 0; /* SPI */ - fwspec.param[1] = dirconn->hwirq; - fwspec.param[2] = IRQ_TYPE_NONE; - fwspec.param_count = 3; - parent_irq = irq_create_fwspec_mapping(&fwspec); - - if (dirconn->gpio != 0) { - irq = irq_create_mapping(pctrl->chip.irqdomain, - dirconn->gpio); - - irq_set_parent(irq, parent_irq); - irq_set_chip(irq, &msm_dirconn_irq_chip); - __irq_set_handler(parent_irq, msm_gpio_dirconn_handler, - false, NULL); - irq_set_handler_data(parent_irq, irq_get_irq_data(irq)); - } else { - __irq_set_handler(parent_irq, msm_gpio_dirconn_handler, - false, NULL); - } + struct irq_data *d; + + request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain, + dirconn->hwirq, dirconn->gpio); + + if (!dirconn->gpio) + continue; + + if (!dirconn->tlmm_dc) + continue; + + /* + * If the gpio is routed through TLMM direct connect interrupts, + * program the TLMM registers for this setup. + */ + d = irq_get_irq_data(irq_find_mapping(pctrl->chip.irqdomain, + dirconn->gpio)); + if (!d) + continue; + + msm_dirconn_cfg_reg(d, pctrl->soc->dir_conn_irq_base + - (u32)dirconn->hwirq); + } + + for (i = 0; i < pctrl->soc->n_pdc_mux_out; i++) { + struct msm_pdc_mux_output *pdc_out = + &pctrl->soc->pdc_mux_out[i]; + + request_dc_interrupt(pctrl->chip.irqdomain, pdc_domain, + pdc_out->hwirq, 0); + } + + /* + * Statically choose the GPIOs for mapping to PDC. Dynamic mux mapping + * is very difficult. + */ + for (i = 0; i < pctrl->soc->n_gpio_mux_in; i++) { + unsigned int irq; + struct irq_data *d; + struct msm_gpio_mux_input *gpio_in = + &pctrl->soc->gpio_mux_in[i]; + if (!gpio_in->init) + continue; + + irq = irq_find_mapping(pctrl->chip.irqdomain, gpio_in->gpio); + d = irq_get_irq_data(irq); + if (!d) + continue; + + gpio_muxed_to_pdc(pdc_domain, d); } } @@ -1554,6 +1740,9 @@ int msm_pinctrl_probe(struct platform_device *pdev, } #endif + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pctrl->pdc_regs = devm_ioremap_resource(&pdev->dev, res); + msm_pinctrl_setup_pm_reset(pctrl); pctrl->irq = platform_get_irq(pdev, 0); diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 7050dc2cf0934d988ee0470d0e860a6b7789b739..6251b4d429a8f5619a9c7bc1b309641a29927500 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, Sony Mobile Communications AB. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -104,16 +105,40 @@ struct msm_pingroup { unsigned intr_detection_bit:5; unsigned intr_detection_width:5; unsigned dir_conn_en_bit:8; -} +}; + +/** + * struct msm_gpio_mux_input - Map GPIO to Mux pin + * @mux:: The mux pin to which the GPIO is connected to + * @gpio: GPIO pin number + * @init: Setup PDC connection at probe + */ +struct msm_gpio_mux_input { + unsigned int mux; + unsigned int gpio; + bool init; +}; + +/** + * struct msm_pdc_mux_output - GPIO mux pin to PDC port + * @mux: GPIO mux pin number + * @hwirq: The PDC port (hwirq) that GPIO is connected to + */ +struct msm_pdc_mux_output { + unsigned int mux; + irq_hw_number_t hwirq; +}; /** * struct msm_dir_conn - Direct GPIO connect configuration * @gpio: GPIO pin number * @hwirq: The GIC interrupt that the pin is connected to - */; + * @tlmm_dc: indicates if the GPIO is routed to GIC directly + */ struct msm_dir_conn { unsigned int gpio; irq_hw_number_t hwirq; + bool tlmm_dc; }; /** @@ -129,6 +154,11 @@ struct msm_dir_conn { * @dir_conn: An array describing all the pins directly connected to GIC. * @ndirconns: The number of pins directly connected to GIC * @dir_conn_irq_base: Direct connect interrupt base register for kpss. + * @gpio_mux_in: Map of GPIO pin to the hwirq. + * @n_gpioc_mux_in: The number of entries in @pdc_mux_in. + * @pdc_mux_out: Map of GPIO mux to PDC port. + * @n_pdc_mux_out: The number of entries in @pdc_mux_out. + * @n_pdc_offset: The offset for the PDC mux pins */ struct msm_pinctrl_soc_data { const struct pinctrl_pin_desc *pins; @@ -142,6 +172,11 @@ struct msm_pinctrl_soc_data { const struct msm_dir_conn *dir_conn; unsigned int n_dir_conns; unsigned int dir_conn_irq_base; + struct msm_pdc_mux_output *pdc_mux_out; + unsigned int n_pdc_mux_out; + struct msm_gpio_mux_input *gpio_mux_in; + unsigned int n_gpio_mux_in; + unsigned int n_pdc_mux_offset; #ifdef CONFIG_FRAGMENTED_GPIO_ADDRESS_SPACE const u32 *tile_start; const u32 *tile_offsets; diff --git a/drivers/pinctrl/qcom/pinctrl-qcs405.c b/drivers/pinctrl/qcom/pinctrl-qcs405.c index 9299ff14956435700cfc73da4a8249944b2e5e81..833ed4c8398cca8ac1ba442115f0bed2b47ac295 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs405.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs405.c @@ -422,7 +422,7 @@ static const unsigned int sdc2_data_pins[] = { 126 }; enum qcs405_functions { msm_mux_gpio, - msm_mux_hdmi_tx, + msm_mux_hdmi_cec, msm_mux_hdmi_ddc, msm_mux_blsp_uart_tx_a2, msm_mux_blsp_spi2, @@ -477,6 +477,7 @@ enum qcs405_functions { msm_mux_pwm_led11, msm_mux_i2s_3_data0_a, msm_mux_ebi2_lcd, + msm_mux_hdmi_hot, msm_mux_i2s_3_data1_a, msm_mux_i2s_3_data2_a, msm_mux_atest_char, @@ -631,7 +632,7 @@ static const char * const gpio_groups[] = { "gpio113", "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119", }; -static const char * const hdmi_tx_groups[] = { +static const char * const hdmi_cec_groups[] = { "gpio14", }; static const char * const hdmi_ddc_groups[] = { @@ -794,6 +795,9 @@ static const char * const blsp_i2c_scl_b2_groups[] = { static const char * const pwm_led11_groups[] = { "gpio43", }; +static const char * const hdmi_hot_groups[] = { + "gpio106", +}; static const char * const i2s_3_data0_a_groups[] = { "gpio106", }; @@ -1187,7 +1191,7 @@ static const char * const i2s_3_ws_a_groups[] = { static const struct msm_function qcs405_functions[] = { FUNCTION(gpio), - FUNCTION(hdmi_tx), + FUNCTION(hdmi_cec), FUNCTION(hdmi_ddc), FUNCTION(blsp_uart_tx_a2), FUNCTION(blsp_spi2), @@ -1240,6 +1244,7 @@ static const struct msm_function qcs405_functions[] = { FUNCTION(blsp_i2c_sda_b2), FUNCTION(blsp_i2c_scl_b2), FUNCTION(pwm_led11), + FUNCTION(hdmi_hot), FUNCTION(i2s_3_data0_a), FUNCTION(ebi2_lcd), FUNCTION(i2s_3_data1_a), @@ -1391,7 +1396,7 @@ static const struct msm_pingroup qcs405_groups[] = { [11] = PINGROUP(11, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), [12] = PINGROUP(12, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), [13] = PINGROUP(13, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), - [14] = PINGROUP(14, SOUTH, hdmi_tx, NA, NA, NA, NA, NA, NA, NA, NA), + [14] = PINGROUP(14, SOUTH, hdmi_cec, NA, NA, NA, NA, NA, NA, NA, NA), [15] = PINGROUP(15, SOUTH, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA), [16] = PINGROUP(16, SOUTH, hdmi_ddc, NA, NA, NA, NA, NA, NA, NA, NA), [17] = PINGROUP(17, NORTH, blsp_uart_tx_a2, blsp_spi2, m_voc, NA, NA, @@ -1554,8 +1559,8 @@ static const struct msm_pingroup qcs405_groups[] = { [104] = PINGROUP(104, EAST, i2s_3_sck_a, NA, NA, NA, NA, NA, NA, NA, NA), [105] = PINGROUP(105, EAST, i2s_3_ws_a, NA, NA, NA, NA, NA, NA, NA, NA), - [106] = PINGROUP(106, EAST, i2s_3_data0_a, ebi2_lcd, NA, NA, ebi_cdc, - NA, NA, NA, NA), + [106] = PINGROUP(106, EAST, i2s_3_data0_a, ebi2_lcd, hdmi_hot, NA, + ebi_cdc, NA, NA, NA, NA), [107] = PINGROUP(107, EAST, i2s_3_data1_a, ebi2_lcd, NA, NA, ebi_cdc, NA, NA, NA, NA), [108] = PINGROUP(108, EAST, i2s_3_data2_a, ebi2_lcd, atest_char, diff --git a/drivers/pinctrl/qcom/pinctrl-sm6150.c b/drivers/pinctrl/qcom/pinctrl-sm6150.c index 39448b33c79fb83a1a9c581235c5cafcdc716657..601384ee2174a49e35667efd82d00e0eb391cdb1 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm6150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm6150.c @@ -1570,14 +1570,14 @@ static struct msm_dir_conn sm6150_dir_conn[] = { {19, 528}, {21, 563}, {22, 516}, - {35, 517}, - {39, 633}, {26, 518}, + {35, 517}, + {39, 633}, /* GPIO 39 mapped to SPI 640 as well */ {41, 527}, {47, 529}, {48, 531}, {50, 532}, - {51, 631}, + {51, 631}, /* GPIO 51 mapped to SPI 638 as well */ {55, 536}, {56, 537}, {57, 538}, @@ -1591,8 +1591,8 @@ static struct msm_dir_conn sm6150_dir_conn[] = { {85, 614}, {86, 547}, {87, 564}, - {88, 632}, - {89, 630}, + {88, 632}, /* GPIO 88 mapped to SPI 639 as well */ + {89, 630}, /* GPIO 89 mapped to SPI 637 as well */ {90, 549}, {92, 568}, {93, 555}, @@ -1600,8 +1600,8 @@ static struct msm_dir_conn sm6150_dir_conn[] = { {95, 552}, {96, 562}, {97, 554}, - {98, 609}, - {99, 610}, + {98, 610}, + {99, 609}, {100, 615}, {101, 520}, {102, 573}, diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c index 1b2b6b41298330a0a6ec5963b4bdcf187569a320..f0442dcb4eb723f119e2b9f61576da632757c9d5 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c @@ -492,6 +492,7 @@ static const unsigned int ufs_reset_pins[] = { 178 }; enum sm8150_functions { msm_mux_phase_flag8, msm_mux_phase_flag7, + msm_mux_emac_pps, msm_mux_qup12, msm_mux_qup16, msm_mux_tsif1_clk, @@ -706,6 +707,9 @@ static const char * const phase_flag8_groups[] = { static const char * const phase_flag7_groups[] = { "gpio80", }; +static const char * const emac_pps_groups[] = { + "gpio81", +}; static const char * const qup12_groups[] = { "gpio83", "gpio84", "gpio85", "gpio86", }; @@ -1355,6 +1359,7 @@ static const char * const phase_flag24_groups[] = { static const struct msm_function sm8150_functions[] = { FUNCTION(phase_flag8), FUNCTION(phase_flag7), + FUNCTION(emac_pps), FUNCTION(qup12), FUNCTION(qup16), FUNCTION(tsif1_clk), @@ -1693,7 +1698,7 @@ static const struct msm_pingroup sm8150_groups[] = { [79] = PINGROUP(79, SOUTH, NA, NA, phase_flag8, NA, NA, NA, NA, NA, NA), [80] = PINGROUP(80, SOUTH, NA, NA, phase_flag7, NA, NA, NA, NA, NA, NA), [81] = PINGROUP(81, SOUTH, NA, NA, NA, nav_pps, nav_pps, qup_l4, - mdp_vsync, NA, NA), + mdp_vsync, emac_pps, NA), [82] = PINGROUP(82, SOUTH, NA, NA, NA, nav_pps, nav_pps, qup_l5, mdp_vsync, NA, NA), [83] = PINGROUP(83, NORTH, qup12, qup16, NA, qdss_gpio2, NA, NA, NA, diff --git a/drivers/pinctrl/qcom/pinctrl-trinket.c b/drivers/pinctrl/qcom/pinctrl-trinket.c new file mode 100644 index 0000000000000000000000000000000000000000..5ffd4af33c8087ffb032d14b2f4abd3b51fb1efd --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-trinket.c @@ -0,0 +1,1584 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define NORTH +#define SOUTH 0x00500000 +#define WEST 0x00100000 +#define EAST 0x00900000 +#define DUMMY 0x0 +#define REG_SIZE 0x1000 +#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = base + REG_SIZE * id, \ + .io_reg = base + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ + .intr_status_reg = base + 0xc + REG_SIZE * id, \ + .intr_target_reg = base + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc trinket_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "SDC1_RCLK"), + PINCTRL_PIN(134, "SDC1_CLK"), + PINCTRL_PIN(135, "SDC1_CMD"), + PINCTRL_PIN(136, "SDC1_DATA"), + PINCTRL_PIN(137, "SDC2_CLK"), + PINCTRL_PIN(138, "SDC2_CMD"), + PINCTRL_PIN(139, "SDC2_DATA"), + PINCTRL_PIN(140, "UFS_RESET"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); + +static const unsigned int sdc1_rclk_pins[] = { 133 }; +static const unsigned int sdc1_clk_pins[] = { 134 }; +static const unsigned int sdc1_cmd_pins[] = { 135 }; +static const unsigned int sdc1_data_pins[] = { 136 }; +static const unsigned int sdc2_clk_pins[] = { 137 }; +static const unsigned int sdc2_cmd_pins[] = { 138 }; +static const unsigned int sdc2_data_pins[] = { 139 }; +static const unsigned int ufs_reset_pins[] = { 140 }; + +enum trinket_functions { + msm_mux_qup00, + msm_mux_gpio, + msm_mux_qdss_gpio6, + msm_mux_qdss_gpio7, + msm_mux_qdss_gpio8, + msm_mux_qdss_gpio9, + msm_mux_qup01, + msm_mux_qup02, + msm_mux_ddr_pxi0, + msm_mux_ddr_bist, + msm_mux_atest_tsens2, + msm_mux_vsense_trigger, + msm_mux_atest_usb1, + msm_mux_GP_PDM1, + msm_mux_phase_flag23, + msm_mux_dbg_out, + msm_mux_phase_flag28, + msm_mux_qup14, + msm_mux_atest_usb11, + msm_mux_ddr_pxi2, + msm_mux_atest_usb10, + msm_mux_JITTER_BIST, + msm_mux_ddr_pxi3, + msm_mux_pll_bypassnl, + msm_mux_PLL_BIST, + msm_mux_qup03, + msm_mux_pll_reset, + msm_mux_AGERA_PLL, + msm_mux_qdss_cti, + msm_mux_qup04, + msm_mux_wlan2_adc1, + msm_mux_wlan2_adc0, + msm_mux_WSA_CLK, + msm_mux_qup13, + msm_mux_ter_mi2s, + msm_mux_WSA_DATA, + msm_mux_qdss_gpio4, + msm_mux_qdss_gpio5, + msm_mux_qup10, + msm_mux_gcc_gp3, + msm_mux_phase_flag0, + msm_mux_phase_flag3, + msm_mux_phase_flag2, + msm_mux_phase_flag1, + msm_mux_qup12, + msm_mux_phase_flag15, + msm_mux_sd_write, + msm_mux_phase_flag29, + msm_mux_qup11, + msm_mux_phase_flag10, + msm_mux_cam_mclk, + msm_mux_atest_tsens, + msm_mux_cci_i2c, + msm_mux_qdss_gpio1, + msm_mux_cci_timer2, + msm_mux_cci_timer1, + msm_mux_gcc_gp2, + msm_mux_qdss_gpio2, + msm_mux_cci_async, + msm_mux_cci_timer4, + msm_mux_qdss_gpio12, + msm_mux_cci_timer0, + msm_mux_gcc_gp1, + msm_mux_qdss_gpio13, + msm_mux_cci_timer3, + msm_mux_qdss_gpio14, + msm_mux_qdss_gpio15, + msm_mux_wlan1_adc1, + msm_mux_qdss_gpio3, + msm_mux_wlan1_adc0, + msm_mux_qlink_request, + msm_mux_qlink_enable, + msm_mux_pa_indicator, + msm_mux_NAV_PPS, + msm_mux_GPS_TX, + msm_mux_GP_PDM0, + msm_mux_phase_flag22, + msm_mux_atest_usb13, + msm_mux_ddr_pxi1, + msm_mux_phase_flag4, + msm_mux_atest_usb12, + msm_mux_phase_flag9, + msm_mux_phase_flag8, + msm_mux_phase_flag7, + msm_mux_phase_flag27, + msm_mux_CRI_TRNG0, + msm_mux_phase_flag26, + msm_mux_CRI_TRNG, + msm_mux_phase_flag25, + msm_mux_CRI_TRNG1, + msm_mux_phase_flag6, + msm_mux_GP_PDM2, + msm_mux_phase_flag5, + msm_mux_SP_CMU, + msm_mux_atest_usb2, + msm_mux_atest_usb23, + msm_mux_uim2_data, + msm_mux_uim2_clk, + msm_mux_uim2_reset, + msm_mux_atest_usb22, + msm_mux_uim2_present, + msm_mux_atest_usb21, + msm_mux_uim1_data, + msm_mux_atest_usb20, + msm_mux_uim1_clk, + msm_mux_uim1_reset, + msm_mux_uim1_present, + msm_mux_mdp_vsync, + msm_mux_phase_flag17, + msm_mux_qdss_gpio0, + msm_mux_phase_flag13, + msm_mux_qdss_gpio, + msm_mux_phase_flag16, + msm_mux_phase_flag12, + msm_mux_phase_flag18, + msm_mux_qdss_gpio10, + msm_mux_copy_gp, + msm_mux_qdss_gpio11, + msm_mux_tsense_pwm, + msm_mux_mpm_pwr, + msm_mux_tgu_ch3, + msm_mux_phase_flag31, + msm_mux_mdp_vsync0, + msm_mux_mdp_vsync1, + msm_mux_mdp_vsync2, + msm_mux_mdp_vsync3, + msm_mux_mdp_vsync4, + msm_mux_mdp_vsync5, + msm_mux_tgu_ch0, + msm_mux_phase_flag11, + msm_mux_tgu_ch1, + msm_mux_atest_char1, + msm_mux_vfr_1, + msm_mux_tgu_ch2, + msm_mux_phase_flag30, + msm_mux_atest_char0, + msm_mux_phase_flag24, + msm_mux_atest_char2, + msm_mux_atest_char3, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_phase_flag19, + msm_mux_prng_rosc, + msm_mux_dp_hot, + msm_mux_debug_hot, + msm_mux_COPY_PHASE, + msm_mux_usb_phy, + msm_mux_atest_char, + msm_mux_mss_lte, + msm_mux_swr_tx, + msm_mux_aud_sb, + msm_mux_qua_mi2s, + msm_mux_swr_rx, + msm_mux_edp_hot, + msm_mux_audio_ref, + msm_mux_pri_mi2s, + msm_mux_pri_mi2s_ws, + msm_mux_adsp_ext, + msm_mux_edp_lcd, + msm_mux_mclk2, + msm_mux_m_voc, + msm_mux_mclk1, + msm_mux_qca_sb, + msm_mux_qui_mi2s, + msm_mux_DMIC0_CLK, + msm_mux_sec_mi2s, + msm_mux_DMIC0_DATA, + msm_mux_DMIC1_CLK, + msm_mux_DMIC1_DATA, + msm_mux_phase_flag14, + msm_mux_phase_flag21, + msm_mux_phase_flag20, + msm_mux_NA, +}; + +static const char * const qup00_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio0", "gpio35", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio1", "gpio36", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio2", "gpio42", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio3", "gpio83", +}; +static const char * const qup01_groups[] = { + "gpio4", "gpio5", +}; +static const char * const qup02_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio6", "gpio7", +}; +static const char * const ddr_bist_groups[] = { + "gpio7", "gpio8", "gpio9", "gpio10", +}; +static const char * const atest_tsens2_groups[] = { + "gpio7", +}; +static const char * const vsense_trigger_groups[] = { + "gpio7", +}; +static const char * const atest_usb1_groups[] = { + "gpio7", +}; +static const char * const GP_PDM1_groups[] = { + "gpio8", "gpio65", +}; +static const char * const phase_flag23_groups[] = { + "gpio8", +}; +static const char * const dbg_out_groups[] = { + "gpio9", +}; +static const char * const phase_flag28_groups[] = { + "gpio9", +}; +static const char * const qup14_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio13", +}; +static const char * const atest_usb11_groups[] = { + "gpio10", +}; +static const char * const ddr_pxi2_groups[] = { + "gpio10", "gpio11", +}; +static const char * const atest_usb10_groups[] = { + "gpio11", +}; +static const char * const JITTER_BIST_groups[] = { + "gpio12", "gpio31", +}; +static const char * const ddr_pxi3_groups[] = { + "gpio12", "gpio13", +}; +static const char * const pll_bypassnl_groups[] = { + "gpio13", +}; +static const char * const PLL_BIST_groups[] = { + "gpio13", "gpio32", +}; +static const char * const qup03_groups[] = { + "gpio14", "gpio15", +}; +static const char * const pll_reset_groups[] = { + "gpio14", +}; +static const char * const AGERA_PLL_groups[] = { + "gpio14", "gpio33", +}; +static const char * const qdss_cti_groups[] = { + "gpio14", "gpio15", "gpio95", "gpio101", "gpio106", "gpio107", + "gpio110", "gpio111", +}; +static const char * const qup04_groups[] = { + "gpio16", "gpio17", +}; +static const char * const wlan2_adc1_groups[] = { + "gpio16", +}; +static const char * const wlan2_adc0_groups[] = { + "gpio17", +}; +static const char * const WSA_CLK_groups[] = { + "gpio18", +}; +static const char * const qup13_groups[] = { + "gpio18", "gpio19", "gpio20", "gpio21", +}; +static const char * const ter_mi2s_groups[] = { + "gpio18", "gpio19", "gpio20", "gpio21", +}; +static const char * const WSA_DATA_groups[] = { + "gpio19", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio20", "gpio49", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio21", "gpio34", +}; +static const char * const qup10_groups[] = { + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", +}; +static const char * const gcc_gp3_groups[] = { + "gpio22", "gpio58", +}; +static const char * const phase_flag0_groups[] = { + "gpio23", +}; +static const char * const phase_flag3_groups[] = { + "gpio24", +}; +static const char * const phase_flag2_groups[] = { + "gpio25", +}; +static const char * const phase_flag1_groups[] = { + "gpio26", +}; +static const char * const qup12_groups[] = { + "gpio28", "gpio29", +}; +static const char * const phase_flag15_groups[] = { + "gpio28", +}; +static const char * const sd_write_groups[] = { + "gpio29", +}; +static const char * const phase_flag29_groups[] = { + "gpio29", +}; +static const char * const qup11_groups[] = { + "gpio30", "gpio31", "gpio32", "gpio33", +}; +static const char * const phase_flag10_groups[] = { + "gpio30", +}; +static const char * const cam_mclk_groups[] = { + "gpio34", "gpio35", "gpio36", +}; +static const char * const atest_tsens_groups[] = { + "gpio34", +}; +static const char * const cci_i2c_groups[] = { + "gpio37", "gpio38", "gpio39", "gpio40", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio41", "gpio116", +}; +static const char * const cci_timer2_groups[] = { + "gpio42", +}; +static const char * const cci_timer1_groups[] = { + "gpio43", +}; +static const char * const gcc_gp2_groups[] = { + "gpio43", "gpio44", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio43", "gpio117", +}; +static const char * const cci_async_groups[] = { + "gpio44", "gpio47", "gpio48", +}; +static const char * const cci_timer4_groups[] = { + "gpio44", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio44", "gpio100", +}; +static const char * const cci_timer0_groups[] = { + "gpio45", +}; +static const char * const gcc_gp1_groups[] = { + "gpio45", "gpio46", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio45", "gpio94", +}; +static const char * const cci_timer3_groups[] = { + "gpio46", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio46", "gpio86", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio47", "gpio96", +}; +static const char * const wlan1_adc1_groups[] = { + "gpio47", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio48", "gpio118", +}; +static const char * const wlan1_adc0_groups[] = { + "gpio48", +}; +static const char * const qlink_request_groups[] = { + "gpio50", +}; +static const char * const qlink_enable_groups[] = { + "gpio51", +}; +static const char * const pa_indicator_groups[] = { + "gpio52", +}; +static const char * const NAV_PPS_groups[] = { + "gpio52", "gpio55", "gpio56", "gpio58", + "gpio59", +}; +static const char * const GPS_TX_groups[] = { + "gpio52", "gpio53", "gpio55", "gpio56", "gpio58", "gpio59", +}; +static const char * const GP_PDM0_groups[] = { + "gpio53", "gpio94", +}; +static const char * const phase_flag22_groups[] = { + "gpio53", +}; +static const char * const atest_usb13_groups[] = { + "gpio53", +}; +static const char * const ddr_pxi1_groups[] = { + "gpio53", "gpio54", +}; +static const char * const phase_flag4_groups[] = { + "gpio54", +}; +static const char * const atest_usb12_groups[] = { + "gpio54", +}; +static const char * const phase_flag9_groups[] = { + "gpio55", +}; +static const char * const phase_flag8_groups[] = { + "gpio56", +}; +static const char * const phase_flag7_groups[] = { + "gpio57", +}; +static const char * const phase_flag27_groups[] = { + "gpio58", +}; +static const char * const CRI_TRNG0_groups[] = { + "gpio59", +}; +static const char * const phase_flag26_groups[] = { + "gpio59", +}; +static const char * const CRI_TRNG_groups[] = { + "gpio60", +}; +static const char * const phase_flag25_groups[] = { + "gpio60", +}; +static const char * const CRI_TRNG1_groups[] = { + "gpio61", +}; +static const char * const phase_flag6_groups[] = { + "gpio61", +}; +static const char * const GP_PDM2_groups[] = { + "gpio62", "gpio78", +}; +static const char * const phase_flag5_groups[] = { + "gpio62", +}; +static const char * const SP_CMU_groups[] = { + "gpio63", +}; +static const char * const atest_usb2_groups[] = { + "gpio66", +}; +static const char * const atest_usb23_groups[] = { + "gpio67", +}; +static const char * const uim2_data_groups[] = { + "gpio72", +}; +static const char * const uim2_clk_groups[] = { + "gpio73", +}; +static const char * const uim2_reset_groups[] = { + "gpio74", +}; +static const char * const atest_usb22_groups[] = { + "gpio74", +}; +static const char * const uim2_present_groups[] = { + "gpio75", +}; +static const char * const atest_usb21_groups[] = { + "gpio75", +}; +static const char * const uim1_data_groups[] = { + "gpio76", +}; +static const char * const atest_usb20_groups[] = { + "gpio76", +}; +static const char * const uim1_clk_groups[] = { + "gpio77", +}; +static const char * const uim1_reset_groups[] = { + "gpio78", +}; +static const char * const uim1_present_groups[] = { + "gpio79", +}; +static const char * const mdp_vsync_groups[] = { + "gpio80", "gpio81", "gpio82", "gpio89", "gpio96", "gpio97", +}; +static const char * const phase_flag17_groups[] = { + "gpio80", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio80", "gpio115", +}; +static const char * const phase_flag13_groups[] = { + "gpio81", +}; +static const char * const qdss_gpio_groups[] = { + "gpio81", "gpio82", "gpio102", "gpio114", +}; +static const char * const phase_flag16_groups[] = { + "gpio82", +}; +static const char * const phase_flag12_groups[] = { + "gpio83", +}; +static const char * const phase_flag18_groups[] = { + "gpio84", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio84", "gpio91", +}; +static const char * const copy_gp_groups[] = { + "gpio85", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio85", "gpio92", +}; +static const char * const tsense_pwm_groups[] = { + "gpio87", +}; +static const char * const mpm_pwr_groups[] = { + "gpio88", +}; +static const char * const tgu_ch3_groups[] = { + "gpio88", +}; +static const char * const phase_flag31_groups[] = { + "gpio88", +}; +static const char * const mdp_vsync0_groups[] = { + "gpio89", +}; +static const char * const mdp_vsync1_groups[] = { + "gpio89", +}; +static const char * const mdp_vsync2_groups[] = { + "gpio89", +}; +static const char * const mdp_vsync3_groups[] = { + "gpio89", +}; +static const char * const mdp_vsync4_groups[] = { + "gpio89", +}; +static const char * const mdp_vsync5_groups[] = { + "gpio89", +}; +static const char * const tgu_ch0_groups[] = { + "gpio89", +}; +static const char * const phase_flag11_groups[] = { + "gpio89", +}; +static const char * const tgu_ch1_groups[] = { + "gpio90", +}; +static const char * const atest_char1_groups[] = { + "gpio90", +}; +static const char * const vfr_1_groups[] = { + "gpio91", +}; +static const char * const tgu_ch2_groups[] = { + "gpio91", +}; +static const char * const phase_flag30_groups[] = { + "gpio91", +}; +static const char * const atest_char0_groups[] = { + "gpio92", +}; +static const char * const phase_flag24_groups[] = { + "gpio93", +}; +static const char * const atest_char2_groups[] = { + "gpio93", +}; +static const char * const atest_char3_groups[] = { + "gpio94", +}; +static const char * const ldo_en_groups[] = { + "gpio96", +}; +static const char * const ldo_update_groups[] = { + "gpio97", +}; +static const char * const phase_flag19_groups[] = { + "gpio98", +}; +static const char * const prng_rosc_groups[] = { + "gpio98", "gpio100", +}; +static const char * const dp_hot_groups[] = { + "gpio100", +}; +static const char * const debug_hot_groups[] = { + "gpio101", +}; +static const char * const COPY_PHASE_groups[] = { + "gpio101", +}; +static const char * const usb_phy_groups[] = { + "gpio102", +}; +static const char * const atest_char_groups[] = { + "gpio102", +}; +static const char * const mss_lte_groups[] = { + "gpio104", "gpio105", +}; +static const char * const swr_tx_groups[] = { + "gpio106", "gpio107", "gpio108", "gpio109", +}; +static const char * const aud_sb_groups[] = { + "gpio106", "gpio107", "gpio108", "gpio109", +}; +static const char * const qua_mi2s_groups[] = { + "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111", +}; +static const char * const swr_rx_groups[] = { + "gpio110", "gpio111", "gpio112", +}; +static const char * const edp_hot_groups[] = { + "gpio111", +}; +static const char * const audio_ref_groups[] = { + "gpio112", +}; +static const char * const pri_mi2s_groups[] = { + "gpio113", "gpio115", "gpio116", +}; +static const char * const pri_mi2s_ws_groups[] = { + "gpio114", +}; +static const char * const adsp_ext_groups[] = { + "gpio116", +}; +static const char * const edp_lcd_groups[] = { + "gpio117", +}; +static const char * const mclk2_groups[] = { + "gpio118", +}; +static const char * const m_voc_groups[] = { + "gpio118", +}; +static const char * const mclk1_groups[] = { + "gpio119", +}; +static const char * const qca_sb_groups[] = { + "gpio121", "gpio122", +}; +static const char * const qui_mi2s_groups[] = { + "gpio121", "gpio122", "gpio123", "gpio124", +}; +static const char * const DMIC0_CLK_groups[] = { + "gpio125", +}; +static const char * const sec_mi2s_groups[] = { + "gpio125", "gpio126", "gpio127", "gpio128", +}; +static const char * const DMIC0_DATA_groups[] = { + "gpio126", +}; +static const char * const DMIC1_CLK_groups[] = { + "gpio127", +}; +static const char * const DMIC1_DATA_groups[] = { + "gpio128", +}; +static const char * const phase_flag14_groups[] = { + "gpio129", +}; +static const char * const phase_flag21_groups[] = { + "gpio130", +}; +static const char * const phase_flag20_groups[] = { + "gpio131", +}; + +static const struct msm_function trinket_functions[] = { + FUNCTION(qup00), + FUNCTION(gpio), + FUNCTION(qdss_gpio6), + FUNCTION(qdss_gpio7), + FUNCTION(qdss_gpio8), + FUNCTION(qdss_gpio9), + FUNCTION(qup01), + FUNCTION(qup02), + FUNCTION(ddr_pxi0), + FUNCTION(ddr_bist), + FUNCTION(atest_tsens2), + FUNCTION(vsense_trigger), + FUNCTION(atest_usb1), + FUNCTION(GP_PDM1), + FUNCTION(phase_flag23), + FUNCTION(dbg_out), + FUNCTION(phase_flag28), + FUNCTION(qup14), + FUNCTION(atest_usb11), + FUNCTION(ddr_pxi2), + FUNCTION(atest_usb10), + FUNCTION(JITTER_BIST), + FUNCTION(ddr_pxi3), + FUNCTION(pll_bypassnl), + FUNCTION(PLL_BIST), + FUNCTION(qup03), + FUNCTION(pll_reset), + FUNCTION(AGERA_PLL), + FUNCTION(qdss_cti), + FUNCTION(qup04), + FUNCTION(wlan2_adc1), + FUNCTION(wlan2_adc0), + FUNCTION(WSA_CLK), + FUNCTION(qup13), + FUNCTION(ter_mi2s), + FUNCTION(WSA_DATA), + FUNCTION(qdss_gpio4), + FUNCTION(qdss_gpio5), + FUNCTION(qup10), + FUNCTION(gcc_gp3), + FUNCTION(phase_flag0), + FUNCTION(phase_flag3), + FUNCTION(phase_flag2), + FUNCTION(phase_flag1), + FUNCTION(qup12), + FUNCTION(phase_flag15), + FUNCTION(sd_write), + FUNCTION(phase_flag29), + FUNCTION(qup11), + FUNCTION(phase_flag10), + FUNCTION(cam_mclk), + FUNCTION(atest_tsens), + FUNCTION(cci_i2c), + FUNCTION(qdss_gpio1), + FUNCTION(cci_timer2), + FUNCTION(cci_timer1), + FUNCTION(gcc_gp2), + FUNCTION(qdss_gpio2), + FUNCTION(cci_async), + FUNCTION(cci_timer4), + FUNCTION(qdss_gpio12), + FUNCTION(cci_timer0), + FUNCTION(gcc_gp1), + FUNCTION(qdss_gpio13), + FUNCTION(cci_timer3), + FUNCTION(qdss_gpio14), + FUNCTION(qdss_gpio15), + FUNCTION(wlan1_adc1), + FUNCTION(qdss_gpio3), + FUNCTION(wlan1_adc0), + FUNCTION(qlink_request), + FUNCTION(qlink_enable), + FUNCTION(pa_indicator), + FUNCTION(NAV_PPS), + FUNCTION(GPS_TX), + FUNCTION(GP_PDM0), + FUNCTION(phase_flag22), + FUNCTION(atest_usb13), + FUNCTION(ddr_pxi1), + FUNCTION(phase_flag4), + FUNCTION(atest_usb12), + FUNCTION(phase_flag9), + FUNCTION(phase_flag8), + FUNCTION(phase_flag7), + FUNCTION(phase_flag27), + FUNCTION(CRI_TRNG0), + FUNCTION(phase_flag26), + FUNCTION(CRI_TRNG), + FUNCTION(phase_flag25), + FUNCTION(CRI_TRNG1), + FUNCTION(phase_flag6), + FUNCTION(GP_PDM2), + FUNCTION(phase_flag5), + FUNCTION(SP_CMU), + FUNCTION(atest_usb2), + FUNCTION(atest_usb23), + FUNCTION(uim2_data), + FUNCTION(uim2_clk), + FUNCTION(uim2_reset), + FUNCTION(atest_usb22), + FUNCTION(uim2_present), + FUNCTION(atest_usb21), + FUNCTION(uim1_data), + FUNCTION(atest_usb20), + FUNCTION(uim1_clk), + FUNCTION(uim1_reset), + FUNCTION(uim1_present), + FUNCTION(mdp_vsync), + FUNCTION(phase_flag17), + FUNCTION(qdss_gpio0), + FUNCTION(phase_flag13), + FUNCTION(qdss_gpio), + FUNCTION(phase_flag16), + FUNCTION(phase_flag12), + FUNCTION(phase_flag18), + FUNCTION(qdss_gpio10), + FUNCTION(copy_gp), + FUNCTION(qdss_gpio11), + FUNCTION(tsense_pwm), + FUNCTION(mpm_pwr), + FUNCTION(tgu_ch3), + FUNCTION(phase_flag31), + FUNCTION(mdp_vsync0), + FUNCTION(mdp_vsync1), + FUNCTION(mdp_vsync2), + FUNCTION(mdp_vsync3), + FUNCTION(mdp_vsync4), + FUNCTION(mdp_vsync5), + FUNCTION(tgu_ch0), + FUNCTION(phase_flag11), + FUNCTION(tgu_ch1), + FUNCTION(atest_char1), + FUNCTION(vfr_1), + FUNCTION(tgu_ch2), + FUNCTION(phase_flag30), + FUNCTION(atest_char0), + FUNCTION(phase_flag24), + FUNCTION(atest_char2), + FUNCTION(atest_char3), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(phase_flag19), + FUNCTION(prng_rosc), + FUNCTION(dp_hot), + FUNCTION(debug_hot), + FUNCTION(COPY_PHASE), + FUNCTION(usb_phy), + FUNCTION(atest_char), + FUNCTION(mss_lte), + FUNCTION(swr_tx), + FUNCTION(aud_sb), + FUNCTION(qua_mi2s), + FUNCTION(swr_rx), + FUNCTION(edp_hot), + FUNCTION(audio_ref), + FUNCTION(pri_mi2s), + FUNCTION(pri_mi2s_ws), + FUNCTION(adsp_ext), + FUNCTION(edp_lcd), + FUNCTION(mclk2), + FUNCTION(m_voc), + FUNCTION(mclk1), + FUNCTION(qca_sb), + FUNCTION(qui_mi2s), + FUNCTION(DMIC0_CLK), + FUNCTION(sec_mi2s), + FUNCTION(DMIC0_DATA), + FUNCTION(DMIC1_CLK), + FUNCTION(DMIC1_DATA), + FUNCTION(phase_flag14), + FUNCTION(phase_flag21), + FUNCTION(phase_flag20), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup trinket_groups[] = { + [0] = PINGROUP(0, WEST, qup00, NA, qdss_gpio6, NA, NA, NA, NA, NA, NA), + [1] = PINGROUP(1, WEST, qup00, NA, qdss_gpio7, NA, NA, NA, NA, NA, NA), + [2] = PINGROUP(2, WEST, qup00, NA, qdss_gpio8, NA, NA, NA, NA, NA, NA), + [3] = PINGROUP(3, WEST, qup00, NA, qdss_gpio9, NA, NA, NA, NA, NA, NA), + [4] = PINGROUP(4, WEST, qup01, NA, NA, NA, NA, NA, NA, NA, NA), + [5] = PINGROUP(5, WEST, qup01, NA, NA, NA, NA, NA, NA, NA, NA), + [6] = PINGROUP(6, WEST, qup02, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA), + [7] = PINGROUP(7, WEST, qup02, ddr_bist, atest_tsens2, vsense_trigger, + atest_usb1, ddr_pxi0, NA, NA, NA), + [8] = PINGROUP(8, WEST, qup02, GP_PDM1, ddr_bist, NA, phase_flag23, NA, + NA, NA, NA), + [9] = PINGROUP(9, WEST, qup02, ddr_bist, dbg_out, phase_flag28, NA, NA, + NA, NA, NA), + [10] = PINGROUP(10, EAST, qup14, ddr_bist, atest_usb11, ddr_pxi2, NA, + NA, NA, NA, NA), + [11] = PINGROUP(11, EAST, qup14, atest_usb10, ddr_pxi2, NA, NA, NA, NA, + NA, NA), + [12] = PINGROUP(12, EAST, qup14, JITTER_BIST, ddr_pxi3, NA, NA, NA, NA, + NA, NA), + [13] = PINGROUP(13, EAST, qup14, pll_bypassnl, PLL_BIST, NA, ddr_pxi3, + NA, NA, NA, NA), + [14] = PINGROUP(14, WEST, qup03, qup03, pll_reset, AGERA_PLL, NA, + qdss_cti, NA, NA, NA), + [15] = PINGROUP(15, WEST, qup03, qup03, qdss_cti, NA, NA, NA, NA, NA, + NA), + [16] = PINGROUP(16, WEST, qup04, qup04, NA, wlan2_adc1, NA, NA, NA, NA, + NA), + [17] = PINGROUP(17, WEST, qup04, qup04, NA, wlan2_adc0, NA, NA, NA, NA, + NA), + [18] = PINGROUP(18, EAST, WSA_CLK, qup13, ter_mi2s, NA, NA, NA, NA, NA, + NA), + [19] = PINGROUP(19, EAST, WSA_DATA, qup13, ter_mi2s, NA, NA, NA, NA, + NA, NA), + [20] = PINGROUP(20, EAST, qup13, ter_mi2s, qdss_gpio4, NA, NA, NA, NA, + NA, NA), + [21] = PINGROUP(21, EAST, qup13, ter_mi2s, NA, qdss_gpio5, NA, NA, NA, + NA, NA), + [22] = PINGROUP(22, WEST, qup10, gcc_gp3, NA, NA, NA, NA, NA, NA, NA), + [23] = PINGROUP(23, WEST, qup10, NA, phase_flag0, NA, NA, NA, NA, NA, + NA), + [24] = PINGROUP(24, WEST, qup10, NA, phase_flag3, NA, NA, NA, NA, NA, + NA), + [25] = PINGROUP(25, WEST, qup10, NA, phase_flag2, NA, NA, NA, NA, NA, + NA), + [26] = PINGROUP(26, WEST, qup10, NA, phase_flag1, NA, NA, NA, NA, NA, + NA), + [27] = PINGROUP(27, WEST, qup10, NA, NA, NA, NA, NA, NA, NA, NA), + [28] = PINGROUP(28, WEST, qup12, NA, phase_flag15, NA, NA, NA, NA, NA, + NA), + [29] = PINGROUP(29, WEST, qup12, sd_write, NA, phase_flag29, NA, NA, + NA, NA, NA), + [30] = PINGROUP(30, WEST, qup11, NA, phase_flag10, NA, NA, NA, NA, NA, + NA), + [31] = PINGROUP(31, WEST, qup11, JITTER_BIST, NA, NA, NA, NA, NA, NA, + NA), + [32] = PINGROUP(32, WEST, qup11, PLL_BIST, NA, NA, NA, NA, NA, NA, NA), + [33] = PINGROUP(33, WEST, qup11, AGERA_PLL, NA, NA, NA, NA, NA, NA, NA), + [34] = PINGROUP(34, SOUTH, cam_mclk, NA, qdss_gpio5, atest_tsens, NA, + NA, NA, NA, NA), + [35] = PINGROUP(35, SOUTH, cam_mclk, NA, qdss_gpio6, NA, NA, NA, NA, + NA, NA), + [36] = PINGROUP(36, SOUTH, cam_mclk, NA, qdss_gpio7, NA, NA, NA, NA, + NA, NA), + [37] = PINGROUP(37, SOUTH, cci_i2c, NA, NA, NA, NA, NA, NA, NA, NA), + [38] = PINGROUP(38, EAST, cci_i2c, NA, NA, NA, NA, NA, NA, NA, NA), + [39] = PINGROUP(39, EAST, cci_i2c, NA, NA, NA, NA, NA, NA, NA, NA), + [40] = PINGROUP(40, EAST, cci_i2c, NA, NA, NA, NA, NA, NA, NA, NA), + [41] = PINGROUP(41, EAST, NA, qdss_gpio1, NA, NA, NA, NA, NA, NA, NA), + [42] = PINGROUP(42, EAST, cci_timer2, NA, qdss_gpio8, NA, NA, NA, NA, + NA, NA), + [43] = PINGROUP(43, EAST, cci_timer1, NA, gcc_gp2, NA, qdss_gpio2, NA, + NA, NA, NA), + [44] = PINGROUP(44, SOUTH, cci_async, cci_timer4, NA, gcc_gp2, NA, + qdss_gpio12, NA, NA, NA), + [45] = PINGROUP(45, SOUTH, cci_timer0, NA, gcc_gp1, qdss_gpio13, NA, + NA, NA, NA, NA), + [46] = PINGROUP(46, SOUTH, cci_timer3, NA, gcc_gp1, NA, qdss_gpio14, + NA, NA, NA, NA), + [47] = PINGROUP(47, SOUTH, cci_async, NA, qdss_gpio15, wlan1_adc1, NA, + NA, NA, NA, NA), + [48] = PINGROUP(48, SOUTH, cci_async, NA, qdss_gpio3, wlan1_adc0, NA, + NA, NA, NA, NA), + [49] = PINGROUP(49, SOUTH, qdss_gpio4, NA, NA, NA, NA, NA, NA, NA, NA), + [50] = PINGROUP(50, SOUTH, qlink_request, NA, NA, NA, NA, NA, NA, NA, + NA), + [51] = PINGROUP(51, SOUTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA, + NA), + [52] = PINGROUP(52, SOUTH, pa_indicator, NAV_PPS, NAV_PPS, GPS_TX, NA, + NA, NA, NA, NA), + [53] = PINGROUP(53, SOUTH, NA, GPS_TX, GP_PDM0, NA, phase_flag22, + atest_usb13, ddr_pxi1, NA, NA), + [54] = PINGROUP(54, SOUTH, NA, NA, phase_flag4, atest_usb12, ddr_pxi1, + NA, NA, NA, NA), + [55] = PINGROUP(55, SOUTH, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, + phase_flag9, NA, NA, NA), + [56] = PINGROUP(56, SOUTH, NA, NAV_PPS, GPS_TX, NAV_PPS, phase_flag8, + NA, NA, NA, NA), + [57] = PINGROUP(57, SOUTH, NA, phase_flag7, NA, NA, NA, NA, NA, NA, NA), + [58] = PINGROUP(58, SOUTH, NA, NAV_PPS, NAV_PPS, GPS_TX, gcc_gp3, NA, + phase_flag27, NA, NA), + [59] = PINGROUP(59, SOUTH, NA, NAV_PPS, NAV_PPS, GPS_TX, CRI_TRNG0, NA, + phase_flag26, NA, NA), + [60] = PINGROUP(60, SOUTH, NA, CRI_TRNG, NA, phase_flag25, NA, NA, NA, + NA, NA), + [61] = PINGROUP(61, SOUTH, NA, CRI_TRNG1, NA, phase_flag6, NA, NA, NA, + NA, NA), + [62] = PINGROUP(62, SOUTH, NA, NA, GP_PDM2, NA, phase_flag5, NA, NA, + NA, NA), + [63] = PINGROUP(63, SOUTH, NA, SP_CMU, NA, NA, NA, NA, NA, NA, NA), + [64] = PINGROUP(64, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [65] = PINGROUP(65, SOUTH, NA, GP_PDM1, NA, NA, NA, NA, NA, NA, NA), + [66] = PINGROUP(66, SOUTH, NA, NA, atest_usb2, NA, NA, NA, NA, NA, NA), + [67] = PINGROUP(67, SOUTH, NA, NA, atest_usb23, NA, NA, NA, NA, NA, NA), + [68] = PINGROUP(68, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [69] = PINGROUP(69, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [70] = PINGROUP(70, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [71] = PINGROUP(71, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [72] = PINGROUP(72, SOUTH, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA), + [73] = PINGROUP(73, SOUTH, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA), + [74] = PINGROUP(74, SOUTH, uim2_reset, NA, atest_usb22, NA, NA, NA, NA, + NA, NA), + [75] = PINGROUP(75, SOUTH, uim2_present, NA, atest_usb21, NA, NA, NA, + NA, NA, NA), + [76] = PINGROUP(76, SOUTH, uim1_data, NA, atest_usb20, NA, NA, NA, NA, + NA, NA), + [77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA), + [78] = PINGROUP(78, SOUTH, uim1_reset, GP_PDM2, NA, NA, NA, NA, NA, NA, + NA), + [79] = PINGROUP(79, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA, + NA), + [80] = PINGROUP(80, SOUTH, mdp_vsync, NA, phase_flag17, qdss_gpio0, NA, + NA, NA, NA, NA), + [81] = PINGROUP(81, SOUTH, mdp_vsync, NA, phase_flag13, qdss_gpio, NA, + NA, NA, NA, NA), + [82] = PINGROUP(82, SOUTH, mdp_vsync, NA, phase_flag16, qdss_gpio, NA, + NA, NA, NA, NA), + [83] = PINGROUP(83, SOUTH, NA, phase_flag12, qdss_gpio9, NA, NA, NA, + NA, NA, NA), + [84] = PINGROUP(84, SOUTH, NA, phase_flag18, qdss_gpio10, NA, NA, NA, + NA, NA, NA), + [85] = PINGROUP(85, SOUTH, copy_gp, NA, qdss_gpio11, NA, NA, NA, NA, + NA, NA), + [86] = PINGROUP(86, SOUTH, NA, qdss_gpio14, NA, NA, NA, NA, NA, NA, NA), + [87] = PINGROUP(87, WEST, tsense_pwm, NA, NA, NA, NA, NA, NA, NA, NA), + [88] = PINGROUP(88, WEST, mpm_pwr, tgu_ch3, NA, phase_flag31, NA, NA, + NA, NA, NA), + [89] = PINGROUP(89, WEST, mdp_vsync, mdp_vsync0, mdp_vsync1, + mdp_vsync2, mdp_vsync3, mdp_vsync4, mdp_vsync5, + tgu_ch0, NA), + [90] = PINGROUP(90, WEST, tgu_ch1, atest_char1, NA, NA, NA, NA, NA, NA, + NA), + [91] = PINGROUP(91, WEST, vfr_1, tgu_ch2, NA, phase_flag30, + qdss_gpio10, NA, NA, NA, NA), + [92] = PINGROUP(92, WEST, qdss_gpio11, atest_char0, NA, NA, NA, NA, NA, + NA, NA), + [93] = PINGROUP(93, WEST, NA, phase_flag24, atest_char2, NA, NA, NA, + NA, NA, NA), + [94] = PINGROUP(94, SOUTH, GP_PDM0, NA, qdss_gpio13, atest_char3, NA, + NA, NA, NA, NA), + [95] = PINGROUP(95, SOUTH, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA), + [96] = PINGROUP(96, SOUTH, mdp_vsync, ldo_en, qdss_gpio15, NA, NA, NA, + NA, NA, NA), + [97] = PINGROUP(97, SOUTH, mdp_vsync, ldo_update, NA, NA, NA, NA, NA, + NA, NA), + [98] = PINGROUP(98, SOUTH, NA, phase_flag19, prng_rosc, NA, NA, NA, NA, + NA, NA), + [99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [100] = PINGROUP(100, SOUTH, dp_hot, prng_rosc, qdss_gpio12, NA, NA, + NA, NA, NA, NA), + [101] = PINGROUP(101, SOUTH, debug_hot, COPY_PHASE, qdss_cti, NA, NA, + NA, NA, NA, NA), + [102] = PINGROUP(102, SOUTH, usb_phy, NA, qdss_gpio, atest_char, NA, + NA, NA, NA, NA), + [103] = PINGROUP(103, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [104] = PINGROUP(104, EAST, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA), + [105] = PINGROUP(105, EAST, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA), + [106] = PINGROUP(106, EAST, swr_tx, aud_sb, qua_mi2s, NA, qdss_cti, NA, + NA, NA, NA), + [107] = PINGROUP(107, EAST, swr_tx, aud_sb, qua_mi2s, NA, qdss_cti, NA, + NA, NA, NA), + [108] = PINGROUP(108, EAST, swr_tx, aud_sb, qua_mi2s, NA, NA, NA, NA, + NA, NA), + [109] = PINGROUP(109, EAST, swr_tx, aud_sb, qua_mi2s, NA, NA, NA, NA, + NA, NA), + [110] = PINGROUP(110, EAST, swr_rx, qua_mi2s, NA, qdss_cti, NA, NA, NA, + NA, NA), + [111] = PINGROUP(111, EAST, swr_rx, qua_mi2s, edp_hot, NA, qdss_cti, + NA, NA, NA, NA), + [112] = PINGROUP(112, EAST, swr_rx, audio_ref, NA, NA, NA, NA, NA, NA, + NA), + [113] = PINGROUP(113, EAST, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA), + [114] = PINGROUP(114, EAST, pri_mi2s_ws, qdss_gpio, NA, NA, NA, NA, NA, + NA, NA), + [115] = PINGROUP(115, EAST, pri_mi2s, qdss_gpio0, NA, NA, NA, NA, NA, + NA, NA), + [116] = PINGROUP(116, EAST, pri_mi2s, adsp_ext, qdss_gpio1, NA, NA, NA, + NA, NA, NA), + [117] = PINGROUP(117, SOUTH, edp_lcd, qdss_gpio2, NA, NA, NA, NA, NA, + NA, NA), + [118] = PINGROUP(118, SOUTH, mclk2, m_voc, qdss_gpio3, NA, NA, NA, NA, + NA, NA), + [119] = PINGROUP(119, SOUTH, mclk1, NA, NA, NA, NA, NA, NA, NA, NA), + [120] = PINGROUP(120, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [121] = PINGROUP(121, EAST, qca_sb, qui_mi2s, NA, NA, NA, NA, NA, NA, + NA), + [122] = PINGROUP(122, EAST, qca_sb, qui_mi2s, NA, NA, NA, NA, NA, NA, + NA), + [123] = PINGROUP(123, EAST, qui_mi2s, NA, NA, NA, NA, NA, NA, NA, NA), + [124] = PINGROUP(124, EAST, qui_mi2s, NA, NA, NA, NA, NA, NA, NA, NA), + [125] = PINGROUP(125, EAST, DMIC0_CLK, sec_mi2s, NA, NA, NA, NA, NA, + NA, NA), + [126] = PINGROUP(126, EAST, DMIC0_DATA, sec_mi2s, NA, NA, NA, NA, NA, + NA, NA), + [127] = PINGROUP(127, EAST, DMIC1_CLK, sec_mi2s, NA, NA, NA, NA, NA, + NA, NA), + [128] = PINGROUP(128, EAST, DMIC1_DATA, sec_mi2s, NA, NA, NA, NA, NA, + NA, NA), + [129] = PINGROUP(129, SOUTH, NA, phase_flag14, NA, NA, NA, NA, NA, NA, + NA), + [130] = PINGROUP(130, SOUTH, phase_flag21, NA, NA, NA, NA, NA, NA, NA, + NA), + [131] = PINGROUP(131, SOUTH, phase_flag20, NA, NA, NA, NA, NA, NA, NA, + NA), + [132] = PINGROUP(132, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [133] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x18d000, 15, 0), + [134] = SDC_QDSD_PINGROUP(sdc1_clk, 0x18d000, 13, 6), + [135] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x18d000, 11, 3), + [136] = SDC_QDSD_PINGROUP(sdc1_data, 0x18d000, 9, 0), + [137] = SDC_QDSD_PINGROUP(sdc2_clk, 0x58b000, 14, 6), + [138] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x58b000, 11, 3), + [139] = SDC_QDSD_PINGROUP(sdc2_data, 0x58b000, 9, 0), + [140] = UFS_RESET(ufs_reset, 0x190000), +}; + +static const struct msm_pinctrl_soc_data trinket_pinctrl = { + .pins = trinket_pins, + .npins = ARRAY_SIZE(trinket_pins), + .functions = trinket_functions, + .nfunctions = ARRAY_SIZE(trinket_functions), + .groups = trinket_groups, + .ngroups = ARRAY_SIZE(trinket_groups), + .ngpios = 133, +}; + +static int trinket_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &trinket_pinctrl); +} + +static const struct of_device_id trinket_pinctrl_of_match[] = { + { .compatible = "qcom,trinket-pinctrl", }, + { }, +}; + +static struct platform_driver trinket_pinctrl_driver = { + .driver = { + .name = "trinket-pinctrl", + .owner = THIS_MODULE, + .of_match_table = trinket_pinctrl_of_match, + }, + .probe = trinket_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init trinket_pinctrl_init(void) +{ + return platform_driver_register(&trinket_pinctrl_driver); +} +arch_initcall(trinket_pinctrl_init); + +static void __exit trinket_pinctrl_exit(void) +{ + platform_driver_unregister(&trinket_pinctrl_driver); +} +module_exit(trinket_pinctrl_exit); + +MODULE_DESCRIPTION("QTI trinket pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, trinket_pinctrl_of_match); diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 32c1977c87aeaf513eae2f2df873a26ff517127d..5fd32155da4808ac1785ca9c54f0d59fcdc5e0a4 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -104,6 +104,16 @@ config GSI_REGISTER_VERSION_2 new registers offsets, new registers fields structure and new registers. +config MSM_MHI_DEV + tristate "Modem Device Interface Driver" + depends on EP_PCIE && IPA3 + help + This kernel module is used to interact with PCIe Root complex + supporting MHI protocol. MHI is a data transmission protocol + involving communication between a host and a device over shared + memory. MHI interacts with the IPA for supporting transfers + on the HW accelerated channels between Host and device. + config IPA3 tristate "IPA3 support" select GSI diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index 29735cc6b4f878e7040695bc1a3f0fdc0ce864f6..a626166094222769595c3aa1b5523ad789349146 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_GSI) += gsi/ obj-$(CONFIG_IPA) += ipa/ obj-$(CONFIG_IPA3) += ipa/ obj-$(CONFIG_EP_PCIE) += ep_pcie/ +obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/ obj-$(CONFIG_MSM_11AD) += msm_11ad/ obj-$(CONFIG_SEEMP_CORE) += seemp_core/ diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index 2ddaea9fb802552e9afe506c6deabad34e832e97..7cc9274d7d5fa3e5741ccfef5a7a76b8fc3993d0 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -3239,6 +3239,33 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) mode == GSI_CHAN_MODE_CALLBACK) { atomic_set(&ctx->poll_mode, mode); __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0); + + /* + * In GSI 2.2 and 2.5 there is a limitation that can lead + * to losing an interrupt. For these versions an + * explicit check is needed after enabling the interrupt + */ + if (gsi_ctx->per.ver == GSI_VER_2_2 || + gsi_ctx->per.ver == GSI_VER_2_5) { + u32 src = gsi_readl(gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS( + gsi_ctx->per.ee)); + if (src & (1 << ctx->evtr->id)) { + __gsi_config_ieob_irq( + gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + gsi_writel(1 << ctx->evtr->id, gsi_ctx->base + + GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS( + gsi_ctx->per.ee)); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + spin_lock_irqsave(&ctx->ring.slock, flags); + atomic_set( + &ctx->poll_mode, GSI_CHAN_MODE_POLL); + spin_unlock_irqrestore( + &ctx->ring.slock, flags); + ctx->stats.poll_pending_irq++; + return -GSI_STATUS_PENDING_IRQ; + } + } ctx->stats.poll_to_callback++; } spin_unlock_irqrestore(&gsi_ctx->slock, flags); diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h index dbf89418e2fcc74bdc8087786241d7990f71e106..3a0a7694cfe1c992c650325676f01e4e9caa1a70 100644 --- a/drivers/platform/msm/gsi/gsi.h +++ b/drivers/platform/msm/gsi/gsi.h @@ -120,6 +120,7 @@ struct gsi_chan_stats { unsigned long completed; unsigned long callback_to_poll; unsigned long poll_to_callback; + unsigned long poll_pending_irq; unsigned long invalid_tre_error; unsigned long poll_ok; unsigned long poll_empty; diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c index dab8dc29e20b75a3cb763a8eae478dd8efa20901..e3b2fe423852b1f6f39e18b1ea8db5b5546e9944 100644 --- a/drivers/platform/msm/gsi/gsi_dbg.c +++ b/drivers/platform/msm/gsi/gsi_dbg.c @@ -273,9 +273,10 @@ static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx) PRT_STAT("queued=%lu compl=%lu\n", ctx->stats.queued, ctx->stats.completed); - PRT_STAT("cb->poll=%lu poll->cb=%lu\n", + PRT_STAT("cb->poll=%lu poll->cb=%lu poll_pend_irq=%lu\n", ctx->stats.callback_to_poll, - ctx->stats.poll_to_callback); + ctx->stats.poll_to_callback, + ctx->stats.poll_pending_irq); PRT_STAT("invalid_tre_error=%lu\n", ctx->stats.invalid_tre_error); PRT_STAT("poll_ok=%lu poll_empty=%lu\n", diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c index 833ab6bff7edaf19658723c2594dcaa742dd45dd..f139d9efdfa143bf6f86d5addaa0f0df7608b272 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c @@ -160,6 +160,7 @@ struct ipa_gsb_context { struct mutex iface_lock[MAX_SUPPORTED_IFACE]; spinlock_t iface_spinlock[MAX_SUPPORTED_IFACE]; u32 pm_hdl; + atomic_t disconnect_in_progress; }; static struct ipa_gsb_context *ipa_gsb_ctx; @@ -922,7 +923,7 @@ static int ipa_gsb_disconnect_sys_pipe(void) int ipa_bridge_disconnect(u32 hdl) { - int ret; + int ret = 0; if (!ipa_gsb_ctx) { IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n"); @@ -937,31 +938,33 @@ int ipa_bridge_disconnect(u32 hdl) IPA_GSB_DBG("client hdl: %d\n", hdl); mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]); + atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 1); + if (!ipa_gsb_ctx->iface[hdl]) { IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl); - mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); - return -EFAULT; + ret = -EFAULT; + goto fail; } if (!ipa_gsb_ctx->iface[hdl]->is_connected) { IPA_GSB_DBG("iface was not connected\n"); - mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); - return 0; + ret = 0; + goto fail; } if (ipa_gsb_ctx->num_connected_iface == 1) { ret = ipa_gsb_disconnect_sys_pipe(); if (ret) { IPA_GSB_ERR("fail to discon pipes\n"); - mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); - return -EFAULT; + ret = -EFAULT; + goto fail; } ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl); if (ret) { IPA_GSB_ERR("failed to deactivate ipa pm\n"); - mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); - return -EFAULT; + ret = -EFAULT; + goto fail; } } @@ -978,8 +981,10 @@ int ipa_bridge_disconnect(u32 hdl) ipa_gsb_ctx->num_resumed_iface); } +fail: + atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 0); mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]); - return 0; + return ret; } EXPORT_SYMBOL(ipa_bridge_disconnect); @@ -1156,6 +1161,11 @@ int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb, return -EFAULT; } + if (unlikely(atomic_read(&ipa_gsb_ctx->disconnect_in_progress))) { + IPA_GSB_ERR("ipa bridge disconnect_in_progress\n"); + return -EFAULT; + } + /* make sure skb has enough headroom */ if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) { IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index fc851b4010218d6e0a315d2c8bf354640a3579c9..888f4ed54807a0d0b4156c513f7bef5656466233 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -600,6 +600,73 @@ static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) return 0; } +static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + switch (type) { + case IPA_GSB_CONNECT: + case IPA_GSB_DISCONNECT: + break; + default: + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_gsb_info *gsb_info; + struct ipa_msg_meta msg_meta; + void *buff; + + IPADBG("type %d\n", msg_type); + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + + if ((msg_type == IPA_GSB_CONNECT) || + (msg_type == IPA_GSB_DISCONNECT)) { + gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info), + GFP_KERNEL); + if (!gsb_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_gsb_info))) { + kfree(gsb_info); + return -EFAULT; + } + + msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info); + buff = gsb_info; + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + retval = ipa3_send_msg(&msg_meta, buff, + ipa3_gsb_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d, msg_type %d\n", + retval, + msg_type); + kfree(buff); + return retval; + } + IPADBG("exit\n"); + + return 0; +} + static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int retval = 0; @@ -1790,6 +1857,22 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa3_resend_wlan_msg(); break; + case IPA_IOC_GSB_CONNECT: + IPADBG("Got IPA_IOC_GSB_CONNECT\n"); + if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GSB_DISCONNECT: + IPADBG("Got IPA_IOC_GSB_DISCONNECT\n"); + if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) { + retval = -EFAULT; + break; + } + break; + default: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; @@ -2701,11 +2784,34 @@ int _ipa_init_sram_v3(void) IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4); ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); - ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4); - ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + if (ipa_get_hw_type() >= IPA_HW_v4_5) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 12); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 8); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(nat_tbl_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(nat_tbl_ofst)); + } + if (ipa_get_hw_type() >= IPA_HW_v4_0) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(pdn_config_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(pdn_config_ofst)); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(stats_quota_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(stats_quota_ofst)); + } + if (ipa_get_hw_type() <= IPA_HW_v3_5 || + ipa_get_hw_type() >= IPA_HW_v4_5) { + ipa3_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_ofst) - 4); + ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + } ipa3_sram_set_canary(ipa_sram_mmio, (ipa_get_hw_type() >= IPA_HW_v3_5) ? - IPA_MEM_PART(uc_event_ring_ofst) : + IPA_MEM_PART(uc_descriptor_ram_ofst) : IPA_MEM_PART(end_ofst)); iounmap(ipa_sram_mmio); @@ -6871,6 +6977,44 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in, return 0; } +#define MAX_LEN 96 + +void ipa_pc_qmp_enable(void) +{ + char buf[MAX_LEN] = "{class: bcm, res: ipa_pc, val: 1}"; + struct qmp_pkt pkt; + int ret = 0; + + /* prepare the mailbox struct */ + ipa3_ctx->mbox_client.dev = &ipa3_ctx->master_pdev->dev; + ipa3_ctx->mbox_client.tx_block = true; + ipa3_ctx->mbox_client.tx_tout = MBOX_TOUT_MS; + ipa3_ctx->mbox_client.knows_txdone = false; + + ipa3_ctx->mbox = mbox_request_channel(&ipa3_ctx->mbox_client, 0); + if (IS_ERR(ipa3_ctx->mbox)) { + ret = PTR_ERR(ipa3_ctx->mbox); + if (ret != -EPROBE_DEFER) + IPAERR("mailbox channel request failed, ret=%d\n", ret); + goto cleanup; + } + + /* prepare the QMP packet to send */ + pkt.size = MAX_LEN; + pkt.data = buf; + + /* send the QMP packet to AOP */ + ret = mbox_send_message(ipa3_ctx->mbox, &pkt); + if (ret < 0) { + IPAERR("qmp message send failed, ret=%d\n", ret); + goto cleanup; + } + +cleanup: + ipa3_ctx->mbox = NULL; + mbox_free_channel(ipa3_ctx->mbox); +} + /************************************************************** * PCIe Version *************************************************************/ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 0729d370d527908ebbd0fef1612e6fb05251ff45..b42358d6127ef5a007d3c125748578038dbb4b4a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -77,6 +77,8 @@ const char *ipa3_event_name[] = { __stringify(ADD_BRIDGE_VLAN_MAPPING), __stringify(DEL_BRIDGE_VLAN_MAPPING), __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_GSB_CONNECT), + __stringify(IPA_GSB_DISCONNECT), }; const char *ipa3_hdr_l2_type_name[] = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 56b51e13187a7ac452e5e8517ee098671422933f..ee97e9d2a4b12b5bf6ca29c225fe47d6e830c7bb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -294,6 +294,7 @@ int ipa3_send(struct ipa3_sys_context *sys, u32 mem_flag = GFP_ATOMIC; const struct ipa_gsi_ep_config *gsi_ep_cfg; bool send_nop = false; + unsigned int max_desc; if (unlikely(!in_atomic)) mem_flag = GFP_KERNEL; @@ -311,14 +312,18 @@ int ipa3_send(struct ipa3_sys_context *sys, return -EPERM; } - if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) { + max_desc = gsi_ep_cfg->ipa_if_tlv; + if (gsi_ep_cfg->prefetch_mode == GSI_SMART_PRE_FETCH || + gsi_ep_cfg->prefetch_mode == GSI_FREE_PRE_FETCH) + max_desc -= gsi_ep_cfg->prefetch_threshold; + + if (unlikely(num_desc > max_desc)) { IPAERR("Too many chained descriptors need=%d max=%d\n", - num_desc, gsi_ep_cfg->ipa_if_tlv); + num_desc, max_desc); WARN_ON(1); return -EPERM; } - /* initialize only the xfers we use */ memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc); @@ -747,27 +752,24 @@ static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all, /** * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode */ -static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys) +static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys) { int ret; - if (!atomic_read(&sys->curr_polling_state)) { - IPAERR("already in intr mode\n"); - goto fail; - } atomic_set(&sys->curr_polling_state, 0); ipa3_dec_release_wakelock(); ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl, GSI_CHAN_MODE_CALLBACK); if (ret != GSI_STATUS_SUCCESS) { - IPAERR("Failed to switch to intr mode.\n"); - goto fail; + if (ret == -GSI_STATUS_PENDING_IRQ) { + ipa3_inc_acquire_wakelock(); + atomic_set(&sys->curr_polling_state, 1); + } else { + IPAERR("Failed to switch to intr mode.\n"); + } } - return; -fail: - queue_delayed_work(sys->wq, &sys->switch_to_intr_work, - msecs_to_jiffies(1)); + return ret; } /** @@ -780,13 +782,16 @@ static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys) */ static void ipa3_handle_rx(struct ipa3_sys_context *sys) { - int inactive_cycles = 0; + int inactive_cycles; int cnt; + int ret; if (ipa3_ctx->use_ipa_pm) ipa_pm_activate_sync(sys->pm_hdl); else IPA_ACTIVE_CLIENTS_INC_SIMPLE(); +start_poll: + inactive_cycles = 0; do { cnt = ipa3_handle_rx_core(sys, true, true); if (cnt == 0) @@ -809,7 +814,10 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys) } while (inactive_cycles <= POLLING_INACTIVITY_RX); trace_poll_to_intr3(sys->ep->client); - ipa3_rx_switch_to_intr_mode(sys); + ret = ipa3_rx_switch_to_intr_mode(sys); + if (ret == -GSI_STATUS_PENDING_IRQ) + goto start_poll; + if (ipa3_ctx->use_ipa_pm) ipa_pm_deferred_deactivate(sys->pm_hdl); else @@ -824,7 +832,7 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work) dwork = container_of(work, struct delayed_work, work); sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work); - if (sys->ep->napi_enabled) { + if (sys->napi_obj) { /* interrupt mode is done in ipa3_rx_poll context */ ipa_assert(); } else @@ -999,7 +1007,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) ep->valid = 1; ep->client = sys_in->client; ep->client_notify = sys_in->notify; - ep->napi_enabled = sys_in->napi_enabled; + ep->sys->napi_obj = sys_in->napi_obj; ep->priv = sys_in->priv; ep->keep_ipa_awake = sys_in->keep_ipa_awake; atomic_set(&ep->avail_fifo_desc, @@ -1163,7 +1171,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) return result; } - if (ep->napi_enabled) { + if (ep->sys->napi_obj) { do { usleep_range(95, 105); } while (atomic_read(&ep->sys->curr_polling_state)); @@ -1302,6 +1310,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, int num_frags, f; const struct ipa_gsi_ep_config *gsi_ep; int data_idx; + unsigned int max_desc; if (unlikely(!ipa3_ctx)) { IPAERR("IPA3 driver was not initialized\n"); @@ -1356,7 +1365,15 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, * 1 descriptor needed for the linear portion of skb. */ gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client); - if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) { + if (unlikely(gsi_ep == NULL)) { + IPAERR("failed to get EP %d GSI info\n", src_ep_idx); + goto fail_gen; + } + max_desc = gsi_ep->ipa_if_tlv; + if (gsi_ep->prefetch_mode == GSI_SMART_PRE_FETCH || + gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH) + max_desc -= gsi_ep->prefetch_threshold; + if (num_frags + 3 > max_desc) { if (skb_linearize(skb)) { IPAERR("Failed to linear skb with %d frags\n", num_frags); @@ -1509,11 +1526,12 @@ static void ipa3_wq_handle_rx(struct work_struct *work) sys = container_of(work, struct ipa3_sys_context, work); - if (sys->ep->napi_enabled) { + if (sys->napi_obj) { if (!ipa3_ctx->use_ipa_pm) IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI"); - sys->ep->client_notify(sys->ep->priv, - IPA_CLIENT_START_POLL, 0); + else + ipa_pm_activate_sync(sys->pm_hdl); + napi_schedule(sys->napi_obj); } else ipa3_handle_rx(sys); } @@ -1667,7 +1685,7 @@ static void ipa3_cleanup_wlan_rx_common_cache(void) &ipa3_ctx->wc_memb.wlan_comm_desc_list, link) { list_del(&rx_pkt->link); dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr, - IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE); + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_pkt->data.skb); kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); ipa3_ctx->wc_memb.wlan_comm_free_cnt--; @@ -1824,6 +1842,17 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) idx = 0; } } + goto done; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +done: if (idx) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); @@ -1834,18 +1863,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys) IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); } - idx = 0; } - return; - -fail_dma_mapping: - sys->free_skb(rx_pkt->data.skb); -fail_skb_alloc: - kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); -fail_kmem_cache_alloc: - if (rx_len_cached == 0) - queue_delayed_work(sys->wq, &sys->replenish_rx_work, - msecs_to_jiffies(1)); } static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) @@ -1937,6 +1955,17 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) idx = 0; } } + goto done; +fail_dma_mapping: + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->rcycl_list); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +done: if (idx) { ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx, gsi_xfer_elem_array, true); @@ -1947,18 +1976,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys) IPAERR("failed to provide buffer: %d\n", ret); WARN_ON(1); } - idx = 0; } - return; -fail_dma_mapping: - spin_lock_bh(&sys->spinlock); - list_add_tail(&rx_pkt->link, &sys->rcycl_list); - INIT_LIST_HEAD(&rx_pkt->link); - spin_unlock_bh(&sys->spinlock); -fail_kmem_cache_alloc: - if (rx_len_cached == 0) - queue_delayed_work(sys->wq, &sys->replenish_rx_work, - msecs_to_jiffies(1)); } static inline void __trigger_repl_work(struct ipa3_sys_context *sys) @@ -2660,7 +2678,7 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes || !ep->valid || !ep->client_notify)) { - IPAERR("drop pipe=%d ep_valid=%d client_notify=%pK\n", + IPAERR_RL("drop pipe=%d ep_valid=%d client_notify=%pK\n", src_pipe, ep->valid, ep->client_notify); dev_kfree_skb_any(rx_skb); return; @@ -2972,7 +2990,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, sys->repl_hdlr = ipa3_replenish_rx_cache; } - if (in->napi_enabled && in->recycle_enabled) + if (in->napi_obj && in->recycle_enabled) sys->repl_hdlr = ipa3_replenish_rx_cache_recycle; in->ipa_ep_cfg.aggr.aggr_sw_eof_active @@ -3250,7 +3268,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src, desc[1].callback = ipa3_tx_client_rx_pkt_status; } - IPADBG_LOW("calling ipa3_send_one()\n"); + IPADBG_LOW("calling ipa3_send()\n"); if (ipa3_send(sys, 2, desc, true)) { IPAERR("fail to send skb\n"); sys->ep->wstats.rx_pkt_leak += (cnt-1); @@ -3540,24 +3558,22 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys) */ if (ipa3_ctx->use_ipa_pm) { clk_off = ipa_pm_activate(sys->pm_hdl); - if (!clk_off && sys->ep->napi_enabled) { - sys->ep->client_notify(sys->ep->priv, - IPA_CLIENT_START_POLL, 0); + if (!clk_off && sys->napi_obj) { + napi_schedule(sys->napi_obj); return; } queue_work(sys->wq, &sys->work); return; } - if (sys->ep->napi_enabled) { + if (sys->napi_obj) { struct ipa_active_client_logging_info log; IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI"); clk_off = ipa3_inc_client_enable_clks_no_block( &log); if (!clk_off) { - sys->ep->client_notify(sys->ep->priv, - IPA_CLIENT_START_POLL, 0); + napi_schedule(sys->napi_obj); return; } } @@ -3759,7 +3775,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, ep->gsi_mem_info.evt_ring_base_vaddr = gsi_evt_ring_props.ring_base_vaddr; - if (ep->napi_enabled) { + if (ep->sys->napi_obj) { gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT; gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC; } else { @@ -4046,6 +4062,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight) } ep = &ipa3_ctx->ep[clnt_hdl]; +start_poll: while (remain_aggr_weight > 0 && atomic_read(&ep->sys->curr_polling_state)) { atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); @@ -4072,8 +4089,12 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight) } cnt += weight - remain_aggr_weight * IPA_WAN_AGGR_PKT_CNT; if (cnt < weight) { - ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0); - ipa3_rx_switch_to_intr_mode(ep->sys); + napi_complete(ep->sys->napi_obj); + ret = ipa3_rx_switch_to_intr_mode(ep->sys); + if (ret == -GSI_STATUS_PENDING_IRQ && + napi_reschedule(ep->sys->napi_obj)) + goto start_poll; + if (ipa3_ctx->use_ipa_pm) ipa_pm_deferred_deactivate(ep->sys->pm_hdl); else diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index ad18e4cd642663b6b16991b904dc40a6f27e9c47..28038d2ea965a68813bd9015cb7c3e3fda59edd5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -36,6 +36,8 @@ #include "../ipa_common_i.h" #include "ipa_uc_offload_i.h" #include "ipa_pm.h" +#include +#include #define IPA_DEV_NAME_MAX_LEN 15 #define DRV_NAME "ipa" @@ -407,6 +409,8 @@ enum { #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 #define TZ_MEM_PROTECT_REGION_ID 0x10 +#define MBOX_TOUT_MS 100 + struct ipa3_active_client_htable_entry { struct hlist_node list; char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; @@ -761,7 +765,6 @@ struct ipa3_status_stats { * @disconnect_in_progress: Indicates client disconnect in progress. * @qmi_request_sent: Indicates whether QMI request to enable clear data path * request is sent or not. - * @napi_enabled: when true, IPA call client callback to start polling * @client_lock_unlock: callback function to take mutex lock/unlock for USB * clients */ @@ -793,7 +796,6 @@ struct ipa3_ep_context { u32 gsi_offload_state; bool disconnect_in_progress; u32 qmi_request_sent; - bool napi_enabled; u32 eot_in_poll_err; bool ep_delay_set; @@ -882,6 +884,7 @@ struct ipa3_sys_context { void (*repl_hdlr)(struct ipa3_sys_context *sys); struct ipa3_repl_ctx repl; u32 pkt_sent; + struct napi_struct *napi_obj; /* ordering is important - mutable fields go above */ struct ipa3_ep_context *ep; @@ -1649,6 +1652,8 @@ struct ipa3_context { bool vlan_mode_iface[IPA_VLAN_IF_MAX]; bool wdi_over_pcie; struct ipa3_wdi2_ctx wdi2_ctx; + struct mbox_client mbox_client; + struct mbox_chan *mbox; }; struct ipa3_plat_drv_res { @@ -1753,6 +1758,8 @@ struct ipa3_plat_drv_res { * +-------------------------+ * | MODEM HDR | * +-------------------------+ + * | APPS HDR (IPA4.5) | + * +-------------------------+ * | CANARY | * +-------------------------+ * | CANARY | @@ -1765,65 +1772,49 @@ struct ipa3_plat_drv_res { * +-------------------------+ * | CANARY | * +-------------------------+ - * | PDN CONFIG | - * +-------------------------+ - * | CANARY | - * +-------------------------+ - * | CANARY | - * +-------------------------+ - * | QUOTA STATS | - * +-------------------------+ - * | CANARY | - * +-------------------------+ - * | CANARY | - * +-------------------------+ - * | TETH STATS | - * +-------------------------+ - * | CANARY | - * +-------------------------+ - * | CANARY | + * | CANARY (IPA4.5) | * +-------------------------+ - * | V4 FLT STATS | + * | CANARY (IPA4.5) | * +-------------------------+ - * | CANARY | + * | NAT TABLE (IPA4.5) | * +-------------------------+ - * | CANARY | + * | NAT IDX TABLE (IPA4.5) | * +-------------------------+ - * | V6 FLT STATS | + * | NAT EXP TABLE (IPA4.5) | * +-------------------------+ - * | CANARY | + * | CANARY (IPA4.5) | * +-------------------------+ - * | CANARY | + * | CANARY (IPA4.5) | * +-------------------------+ - * | V4 RT STATS | + * | PDN CONFIG | * +-------------------------+ * | CANARY | * +-------------------------+ * | CANARY | * +-------------------------+ - * | V6 RT STATS | + * | QUOTA STATS | * +-------------------------+ - * | CANARY | + * | TETH STATS | * +-------------------------+ - * | CANARY | + * | FnR STATS | * +-------------------------+ * | DROP STATS | * +-------------------------+ - * | CANARY | + * | CANARY (IPA4.5) | * +-------------------------+ - * | CANARY | + * | CANARY (IPA4.5) | * +-------------------------+ - * | MODEM MEM | + * | MODEM MEM | * +-------------------------+ - * | CANARY | + * | Dummy (IPA4.5) | + * +-------------------------+ + * | CANARY (IPA4.5) | * +-------------------------+ - * | UC EVENT RING | From IPA 3.5 + * | UC DESC RAM (IPA3.5) | * +-------------------------+ */ struct ipa3_mem_partition { u32 ofst_start; - u32 nat_ofst; - u32 nat_size; u32 v4_flt_hash_ofst; u32 v4_flt_hash_size; u32 v4_flt_hash_size_ddr; @@ -1868,6 +1859,12 @@ struct ipa3_mem_partition { u32 apps_hdr_proc_ctx_ofst; u32 apps_hdr_proc_ctx_size; u32 apps_hdr_proc_ctx_size_ddr; + u32 nat_tbl_ofst; + u32 nat_tbl_size; + u32 nat_index_tbl_ofst; + u32 nat_index_tbl_size; + u32 nat_exp_tbl_ofst; + u32 nat_exp_tbl_size; u32 modem_comp_decomp_ofst; u32 modem_comp_decomp_size; u32 modem_ofst; @@ -1891,14 +1888,18 @@ struct ipa3_mem_partition { u32 apps_v6_rt_hash_size; u32 apps_v6_rt_nhash_ofst; u32 apps_v6_rt_nhash_size; - u32 uc_event_ring_ofst; - u32 uc_event_ring_size; + u32 uc_descriptor_ram_ofst; + u32 uc_descriptor_ram_size; u32 pdn_config_ofst; u32 pdn_config_size; u32 stats_quota_ofst; u32 stats_quota_size; u32 stats_tethering_ofst; u32 stats_tethering_size; + u32 stats_fnr_ofst; + u32 stats_fnr_size; + + /* Irrelevant starting IPA4.5 */ u32 stats_flt_v4_ofst; u32 stats_flt_v4_size; u32 stats_flt_v6_ofst; @@ -1907,6 +1908,7 @@ struct ipa3_mem_partition { u32 stats_rt_v4_size; u32 stats_rt_v6_ofst; u32 stats_rt_v6_size; + u32 stats_drop_ofst; u32 stats_drop_size; }; @@ -2653,4 +2655,5 @@ int ipa3_get_transport_info( phys_addr_t *phys_addr_ptr, unsigned long *size_ptr); irq_handler_t ipa3_get_isr(void); +void ipa_pc_qmp_enable(void); #endif /* _IPA3_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c index 9001b38cc5ec82c053e908e38eac8d7ec8d319c8..48a0bb99b5159fcc66aaf548a0d73e6f19d858d9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c @@ -477,12 +477,15 @@ struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( IMP_FUNC_ENTRY(); + mutex_lock(&imp_ctx->mutex); + memset(resp, 0, sizeof(*resp)); if (imp_ctx->state != IMP_READY) { IMP_ERR("invalid state %d\n", imp_ctx->state); resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01; + mutex_unlock(&imp_ctx->mutex); return resp; } @@ -493,6 +496,7 @@ struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( IMP_ERR("invalid tr_info_arr_len %d\n", req->tr_info_arr_len); resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; resp->resp.error = IPA_QMI_ERR_NO_MEMORY_V01; + mutex_unlock(&imp_ctx->mutex); return resp; } @@ -502,11 +506,10 @@ struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req( IMP_ERR("no mapping provided, but smmu is enabled\n"); resp->resp.result = IPA_QMI_RESULT_FAILURE_V01; resp->resp.error = IPA_QMI_ERR_INTERNAL_V01; + mutex_unlock(&imp_ctx->mutex); return resp; } - mutex_lock(&imp_ctx->mutex); - if (imp_ctx->dev_info.smmu_enabled) { /* map CTRL */ __map_smmu_info(imp_ctx->md.mhi_dev->dev.parent, @@ -613,21 +616,31 @@ int imp_handle_vote_req(bool vote) mutex_unlock(&imp_ctx->mutex); return -EPERM; } + mutex_unlock(&imp_ctx->mutex); + /* + * Unlock the mutex before calling into mhi for clock vote + * to avoid deadlock on imp mutex. + * Calls into mhi are synchronous and imp callbacks are + * executed from mhi context. + */ if (vote) { ret = mhi_device_get_sync(imp_ctx->md.mhi_dev); if (ret) { IMP_ERR("mhi_sync_get failed %d\n", ret); - mutex_unlock(&imp_ctx->mutex); return ret; } - imp_ctx->lpm_disabled = true; } else { mhi_device_put(imp_ctx->md.mhi_dev); - imp_ctx->lpm_disabled = false; } + mutex_lock(&imp_ctx->mutex); + if (vote) + imp_ctx->lpm_disabled = true; + else + imp_ctx->lpm_disabled = false; mutex_unlock(&imp_ctx->mutex); + return 0; } @@ -925,8 +938,11 @@ static int imp_remove(struct platform_device *pdev) IMP_FUNC_ENTRY(); mhi_driver_unregister(&mhi_driver); mutex_lock(&imp_ctx->mutex); - if (!imp_ctx->in_lpm) + if (!imp_ctx->in_lpm && (imp_ctx->state == IMP_READY || + imp_ctx->state == IMP_STARTED)) { + IMP_DBG("devote IMP with state= %d\n", imp_ctx->state); IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IMP"); + } imp_ctx->in_lpm = false; imp_ctx->lpm_disabled = false; @@ -992,6 +1008,9 @@ void imp_handle_modem_shutdown(void) { IMP_FUNC_ENTRY(); + if (!imp_ctx) + return; + mutex_lock(&imp_ctx->mutex); if (imp_ctx->state == IMP_INVALID) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c index f6772a960d7ea63ef70a208b7bc3582878ba55fc..a6d8ab273f768c8f05d4c6254659bd444558edf0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -255,6 +255,7 @@ static int ipa3_nat_ipv6ct_init_device( dev->smem_offset = smem_offset; dev->is_dev_init = true; + dev->tmp_mem = tmp_mem; mutex_unlock(&dev->lock); IPADBG("ipa dev %s added successful. major:%d minor:%d\n", name, @@ -1086,26 +1087,32 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn) struct ipahal_imm_cmd_pyld *cmd_pyld; int result = 0; struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem); - struct ipa_pdn_entry *pdn_entries = nat_ctx->pdn_mem.base; + struct ipa_pdn_entry *pdn_entries = NULL; IPADBG("\n"); + mutex_lock(&nat_ctx->dev.lock); + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { IPAERR_RL("IPA HW does not support multi PDN\n"); - return -EPERM; + result = -EPERM; + goto bail; } + if (!nat_ctx->dev.is_mem_allocated) { IPAERR_RL( "attempt to modify a PDN entry before the PDN table memory allocation\n"); - return -EPERM; + result = -EPERM; + goto bail; } if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) { IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index); - return -EPERM; + result = -EPERM; + goto bail; } - mutex_lock(&nat_ctx->dev.lock); + pdn_entries = nat_ctx->pdn_mem.base; /* store ip in pdn entries cache array */ pdn_entries[mdfy_pdn->pdn_index].public_ip = diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c index c524db5f00db57ccfd8bbf6338025f7309886d96..7d3a008ef7c95133f1cde17f51be88f69cfe54f1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c @@ -14,6 +14,7 @@ #include "ipa_odl.h" #include #include +#include struct ipa_odl_context *ipa3_odl_ctx; @@ -96,62 +97,67 @@ static ssize_t ipa_odl_ctl_fops_read(struct file *filp, char __user *buf, bool new_state = false; start = buf; - while (1) { - wait_event_interruptible(odl_ctl_msg_wq, - ipa3_odl_ctx->odl_ctl_msg_wq_flag == true); - ipa3_odl_ctx->odl_ctl_msg_wq_flag = false; - if (!ipa3_odl_ctx->odl_state.adpl_open && - !ipa3_odl_ctx->odl_state.odl_disconnected) - break; + ipa3_odl_ctx->odl_ctl_msg_wq_flag = false; - if (ipa3_odl_ctx->odl_state.odl_ep_setup) - new_state = true; - else if (ipa3_odl_ctx->odl_state.odl_disconnected) - new_state = false; - else { - ret = -EAGAIN; - break; - } - - if (old_state != new_state) { - old_state = new_state; + if (!ipa3_odl_ctx->odl_state.adpl_open && + !ipa3_odl_ctx->odl_state.odl_disconnected) { + IPADBG("Failed to send data odl pipe already disconnected\n"); + ret = -EFAULT; + goto send_failed; + } - if (new_state == true) - data = 1; - else if (new_state == false) - data = 0; + if (ipa3_odl_ctx->odl_state.odl_ep_setup) + new_state = true; + else if (ipa3_odl_ctx->odl_state.odl_disconnected) + new_state = false; + else { + IPADBG("Failed to send data odl already running\n"); + ret = -EFAULT; + goto send_failed; + } - if (copy_to_user(buf, &data, - sizeof(data))) { - ret = -EFAULT; - break; - } + if (old_state != new_state) { + old_state = new_state; - buf += sizeof(data); + if (new_state == true) + data = 1; + else if (new_state == false) + data = 0; - if (data == 1) - ipa3_odl_ctx->odl_state.odl_setup_done_sent = - true; + if (copy_to_user(buf, &data, + sizeof(data))) { + IPADBG("Cpoying data to user failed\n"); + ret = -EFAULT; + goto send_failed; } - ret = -EAGAIN; - if (filp->f_flags & O_NONBLOCK) - break; + buf += sizeof(data); - ret = -EINTR; - if (signal_pending(current)) - break; - - if (start != buf) - break; + if (data == 1) + ipa3_odl_ctx->odl_state.odl_setup_done_sent = + true; } + if (start != buf && ret != -EFAULT) ret = buf - start; - +send_failed: return ret; } +static unsigned int ipa_odl_ctl_fops_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + + poll_wait(file, &odl_ctl_msg_wq, wait); + + if (ipa3_odl_ctx->odl_ctl_msg_wq_flag == true) { + IPADBG("Sending read mask to odl control pipe\n"); + mask |= POLLIN | POLLRDNORM; + } + return mask; +} + static long ipa_odl_ctl_fops_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -309,7 +315,7 @@ int ipa_setup_odl_pipe(void) ipa_odl_ep_cfg->client = IPA_CLIENT_ODL_DPL_CONS; ipa_odl_ep_cfg->notify = odl_ipa_packet_receive_notify; - ipa_odl_ep_cfg->napi_enabled = false; + ipa_odl_ep_cfg->napi_obj = NULL; ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE * IPA_FIFO_ELEMENT_SIZE; @@ -565,6 +571,7 @@ static const struct file_operations ipa_odl_ctl_fops = { .release = ipa_odl_ctl_fops_release, .read = ipa_odl_ctl_fops_read, .unlocked_ioctl = ipa_odl_ctl_fops_ioctl, + .poll = ipa_odl_ctl_fops_poll, }; static const struct file_operations ipa_adpl_fops = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 3c3f35ee9e3d81a296c0d089eedbc229027f7039..c8dafd05e5d6e1834ad513549117762eb4754ebb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -1162,9 +1162,11 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) static void ipa3_q6_clnt_svc_exit(struct work_struct *work) { - ipa3_qmi_ctx->server_sq.sq_family = 0; - ipa3_qmi_ctx->server_sq.sq_node = 0; - ipa3_qmi_ctx->server_sq.sq_port = 0; + if (ipa3_qmi_ctx != NULL) { + ipa3_qmi_ctx->server_sq.sq_family = 0; + ipa3_qmi_ctx->server_sq.sq_node = 0; + ipa3_qmi_ctx->server_sq.sq_port = 0; + } } static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, @@ -1174,10 +1176,11 @@ static int ipa3_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, service->service, service->version, service->instance, service->node, service->port); - ipa3_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; - ipa3_qmi_ctx->server_sq.sq_node = service->node; - ipa3_qmi_ctx->server_sq.sq_port = service->port; - + if (ipa3_qmi_ctx != NULL) { + ipa3_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; + ipa3_qmi_ctx->server_sq.sq_node = service->node; + ipa3_qmi_ctx->server_sq.sq_port = service->port; + } if (!workqueues_stopped) { queue_delayed_work(ipa_clnt_req_workqueue, &ipa3_work_svc_arrive, 0); @@ -1315,8 +1318,10 @@ static void ipa3_qmi_service_init_worker(struct work_struct *work) /* start the QMI msg cache */ ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx)); - if (!ipa3_qmi_ctx) + if (!ipa3_qmi_ctx) { + IPAWANERR("Failed to allocate the memory to ipa3_qmi_ctx\n"); return; + } ipa3_qmi_ctx->modem_cfg_emb_pipe_flt = ipa3_get_modem_cfg_emb_pipe_flt(); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 4775351573a34b8072dc08abcb574fb7855aab86..6191811c4ff9b90d94369502bbd89e972440af96 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -1061,13 +1061,12 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, goto error; } /* - * do not allow any rules to be added at end of the "default" routing - * tables + * do not allow any rule to be added at "default" routing + * table */ if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && - (tbl->rule_cnt > 0) && (at_rear != 0)) { - IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d" - , tbl->rule_cnt, at_rear); + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); goto error; } @@ -1297,13 +1296,12 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) } /* - * do not allow any rules to be added at end of the "default" routing - * tables + * do not allow any rule to be added at "default" routing + * table */ if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && - (&entry->link == tbl->head_rt_rule_list.prev)) { - IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n", - tbl->rule_cnt); + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); ret = -EINVAL; goto bail; } @@ -1513,6 +1511,8 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) struct ipa3_rt_entry *rule; struct ipa3_rt_entry *rule_next; struct ipa3_rt_tbl_set *rset; + struct ipa3_hdr_entry *hdr_entry; + struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; u32 apps_start_idx; int id; bool tbl_user = false; @@ -1566,6 +1566,27 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) if (!user_only || rule->ipacm_installed) { list_del(&rule->link); + if (rule->hdr) { + hdr_entry = ipa3_id_find( + rule->rule.hdr_hdl); + if (!hdr_entry || + hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL( + "Header already deleted\n"); + return -EINVAL; + } + } else if (rule->proc_ctx) { + hdr_proc_entry = + ipa3_id_find( + rule->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != + IPA_PROC_HDR_COOKIE) { + IPAERR_RL( + "Proc entry already deleted\n"); + return -EINVAL; + } + } tbl->rule_cnt--; if (rule->hdr) __ipa3_release_hdr(rule->hdr->id); @@ -1573,7 +1594,9 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) __ipa3_release_hdr_proc_ctx( rule->proc_ctx->id); rule->cookie = 0; - idr_remove(tbl->rule_ids, rule->rule_id); + if (!rule->rule_id_valid) + idr_remove(tbl->rule_ids, + rule->rule_id); id = rule->id; kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); @@ -1766,6 +1789,10 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) goto error; } + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPAERR_RL("Default tbl rule cannot be modified\n"); + return -EINVAL; + } /* Adding check to confirm still * header entry present in header table or not */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c index 21820b0c4f96fd2f454ef2c355ba9d3bfea4d152..5178354cf1e9f1bce03ee321bba4de4ee6b75cb2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -1863,6 +1863,10 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl) result); goto fail_dealloc_channel; } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + result = ipa3_release_gsi_channel(clnt_hdl); if (result) { IPAERR("GSI dealloc channel failed %d\n", @@ -1871,11 +1875,6 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl) } ipa_release_uc_smmu_mappings(clnt_hdl); - memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); - - if (!ep->keep_ipa_awake) - IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); - /* for AP+STA stats update */ if (ipa3_ctx->uc_wdi_ctx.stats_notify) ipa3_ctx->uc_wdi_ctx.stats_notify = NULL; @@ -1942,8 +1941,8 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl) ipa3_delete_dflt_flt_rules(clnt_hdl); ipa_release_uc_smmu_mappings(ep->client); - memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); + memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context)); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); @@ -2542,6 +2541,9 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) struct ipa3_ep_context *ep; union __packed gsi_channel_scratch gsi_scratch; int retry_cnt = 0; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; memset(&gsi_scratch, 0, sizeof(gsi_scratch)); ep = &ipa3_ctx->ep[clnt_hdl]; @@ -2555,6 +2557,25 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) } if (ep->gsi_offload_state == (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | IPA_WDI_RESUMED)) { + source_pipe_bitmask = 1 << + ipa3_get_ep_mapping(ep->client); + result = ipa3_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (result) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", result); + IPAERR("remove delay from SCND reg\n"); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } else { + disable_force_clear = true; + } + retry_gsi_stop: result = ipa3_stop_gsi_channel(clnt_hdl); if (result != 0 && result != -GSI_STATUS_AGAIN && @@ -2589,6 +2610,11 @@ int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id) goto fail_start_channel; } } + + if (disable_force_clear) + ipa3_disable_force_clear(clnt_hdl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); return 0; fail_start_channel: fail_read_channel_scratch: diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index bde01ad928ee3ac95a0fd15e3a25a008f118687c..62c8fd06d668ef09eaada5338f538dcc85d3b96e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2059,7 +2059,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 11, 14, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + { 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } }, [IPA_4_5][IPA_CLIENT_APPS_WAN_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, @@ -2071,13 +2071,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, QMB_MASTER_SELECT_DDR, - { 7, 9, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + { 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5][IPA_CLIENT_ODU_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, - { 1, 0, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, + { 1, 0, 16, 20, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5][IPA_CLIENT_ETHERNET_PROD] = { true, IPA_v4_5_GROUP_UL_DL, true, @@ -2101,7 +2101,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 8, 2, 24, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 5 } }, + { 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } }, /* Only for test purpose */ [IPA_4_5][IPA_CLIENT_TEST_PROD] = { true, IPA_v4_5_GROUP_UL_DL, @@ -2252,7 +2252,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping false, IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, QMB_MASTER_SELECT_DDR, - { 7, 9, 20, 24, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } }, + { 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } }, [IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = { true, IPA_v4_5_MHI_GROUP_DDR, true, @@ -2270,13 +2270,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping true, IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP, QMB_MASTER_SELECT_DDR, - { 8, 2, 24, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 5 } }, + { 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } }, [IPA_4_5_MHI][IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD] = { true, IPA_v4_5_MHI_GROUP_DMA, false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, QMB_MASTER_SELECT_DDR, - { 4, 6, 8, 16, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } }, + { 4, 6, 8, 16, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 4 } }, [IPA_4_5_MHI][IPA_CLIENT_MHI_PROD] = { true, IPA_v4_5_MHI_GROUP_PCIE, true, @@ -2380,9 +2380,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping }; static struct ipa3_mem_partition ipa_4_1_mem_part = { - .ofst_start = 0x280, - .nat_ofst = 0x0, - .nat_size = 0x0, + .ofst_start = 0x280, .v4_flt_hash_ofst = 0x288, .v4_flt_hash_size = 0x78, .v4_flt_hash_size_ddr = 0x4000, @@ -2450,8 +2448,8 @@ static struct ipa3_mem_partition ipa_4_1_mem_part = { .apps_v6_rt_hash_size = 0x0, .apps_v6_rt_nhash_ofst = 0x23fc, .apps_v6_rt_nhash_size = 0x0, - .uc_event_ring_ofst = 0x2400, - .uc_event_ring_size = 0x400, + .uc_descriptor_ram_ofst = 0x2400, + .uc_descriptor_ram_size = 0x400, .pdn_config_ofst = 0xbd8, .pdn_config_size = 0x50, .stats_quota_ofst = 0xc30, @@ -2471,9 +2469,7 @@ static struct ipa3_mem_partition ipa_4_1_mem_part = { }; static struct ipa3_mem_partition ipa_4_2_mem_part = { - .ofst_start = 0x280, - .nat_ofst = 0x0, - .nat_size = 0x0, + .ofst_start = 0x280, .v4_flt_hash_ofst = 0x288, .v4_flt_hash_size = 0x0, .v4_flt_hash_size_ddr = 0x0, @@ -2541,8 +2537,8 @@ static struct ipa3_mem_partition ipa_4_2_mem_part = { .apps_v6_rt_hash_size = 0x0, .apps_v6_rt_nhash_ofst = 0x1bfc, .apps_v6_rt_nhash_size = 0x0, - .uc_event_ring_ofst = 0x1c00, - .uc_event_ring_size = 0x400, + .uc_descriptor_ram_ofst = 0x1c00, + .uc_descriptor_ram_size = 0x400, .pdn_config_ofst = 0x9F8, .pdn_config_size = 0x50, .stats_quota_ofst = 0xa50, @@ -2561,6 +2557,104 @@ static struct ipa3_mem_partition ipa_4_2_mem_part = { .stats_drop_size = 0x0, }; +static struct ipa3_mem_partition ipa_4_5_mem_part = { + .uc_info_ofst = 0x80, + .uc_info_size = 0x200, + .ofst_start = 0x280, + .v4_flt_hash_ofst = 0x288, + .v4_flt_hash_size = 0x78, + .v4_flt_hash_size_ddr = 0x4000, + .v4_flt_nhash_ofst = 0x308, + .v4_flt_nhash_size = 0x78, + .v4_flt_nhash_size_ddr = 0x4000, + .v6_flt_hash_ofst = 0x388, + .v6_flt_hash_size = 0x78, + .v6_flt_hash_size_ddr = 0x4000, + .v6_flt_nhash_ofst = 0x408, + .v6_flt_nhash_size = 0x78, + .v6_flt_nhash_size_ddr = 0x4000, + .v4_rt_num_index = 0xf, + .v4_modem_rt_index_lo = 0x0, + .v4_modem_rt_index_hi = 0x7, + .v4_apps_rt_index_lo = 0x8, + .v4_apps_rt_index_hi = 0xe, + .v4_rt_hash_ofst = 0x488, + .v4_rt_hash_size = 0x78, + .v4_rt_hash_size_ddr = 0x4000, + .v4_rt_nhash_ofst = 0x508, + .v4_rt_nhash_size = 0x78, + .v4_rt_nhash_size_ddr = 0x4000, + .v6_rt_num_index = 0xf, + .v6_modem_rt_index_lo = 0x0, + .v6_modem_rt_index_hi = 0x7, + .v6_apps_rt_index_lo = 0x8, + .v6_apps_rt_index_hi = 0xe, + .v6_rt_hash_ofst = 0x588, + .v6_rt_hash_size = 0x78, + .v6_rt_hash_size_ddr = 0x4000, + .v6_rt_nhash_ofst = 0x608, + .v6_rt_nhash_size = 0x78, + .v6_rt_nhash_size_ddr = 0x4000, + .modem_hdr_ofst = 0x688, + .modem_hdr_size = 0x240, + .apps_hdr_ofst = 0x8c8, + .apps_hdr_size = 0x200, + .apps_hdr_size_ddr = 0x800, + .modem_hdr_proc_ctx_ofst = 0xad0, + .modem_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_ofst = 0xcd0, + .apps_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_size_ddr = 0x0, + .nat_tbl_ofst = 0xee0, + .nat_tbl_size = 0x800, + .nat_index_tbl_ofst = 0x16e0, + .nat_index_tbl_size = 0x100, + .nat_exp_tbl_ofst = 0x17e0, + .nat_exp_tbl_size = 0x400, + .pdn_config_ofst = 0x1be8, + .pdn_config_size = 0x50, + .stats_quota_ofst = 0x1c40, + .stats_quota_size = 0x78, + .stats_tethering_ofst = 0x1cb8, + .stats_tethering_size = 0x238, + .stats_flt_v4_ofst = 0, + .stats_flt_v4_size = 0, + .stats_flt_v6_ofst = 0, + .stats_flt_v6_size = 0, + .stats_rt_v4_ofst = 0, + .stats_rt_v4_size = 0, + .stats_rt_v6_ofst = 0, + .stats_rt_v6_size = 0, + .stats_fnr_ofst = 0x1ef0, + .stats_fnr_size = 0x800, + .stats_drop_ofst = 0x26f0, + .stats_drop_size = 0x20, + .modem_comp_decomp_ofst = 0x0, + .modem_comp_decomp_size = 0x0, + .modem_ofst = 0x2718, + .modem_size = 0x100c, + .apps_v4_flt_hash_ofst = 0x2718, + .apps_v4_flt_hash_size = 0x0, + .apps_v4_flt_nhash_ofst = 0x2718, + .apps_v4_flt_nhash_size = 0x0, + .apps_v6_flt_hash_ofst = 0x2718, + .apps_v6_flt_hash_size = 0x0, + .apps_v6_flt_nhash_ofst = 0x2718, + .apps_v6_flt_nhash_size = 0x0, + .apps_v4_rt_hash_ofst = 0x2718, + .apps_v4_rt_hash_size = 0x0, + .apps_v4_rt_nhash_ofst = 0x2718, + .apps_v4_rt_nhash_size = 0x0, + .apps_v6_rt_hash_ofst = 0x2718, + .apps_v6_rt_hash_size = 0x0, + .apps_v6_rt_nhash_ofst = 0x2718, + .apps_v6_rt_nhash_size = 0x0, + .uc_descriptor_ram_ofst = 0x3800, + .uc_descriptor_ram_size = 0x1000, + .end_ofst = 0x4800, +}; + + /** * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an * IPA_RM resource @@ -4873,7 +4967,7 @@ int ipa3_init_mem_partition(enum ipa_hw_type type) ipa3_ctx->ctrl->mem_partition = &ipa_4_2_mem_part; break; case IPA_HW_v4_5: - ipa3_ctx->ctrl->mem_partition = &ipa_4_1_mem_part; + ipa3_ctx->ctrl->mem_partition = &ipa_4_5_mem_part; break; case IPA_HW_None: case IPA_HW_v1_0: @@ -4891,19 +4985,6 @@ int ipa3_init_mem_partition(enum ipa_hw_type type) return -EPERM; } - if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) { - IPAERR("UC EVENT RING OFST 0x%x is unaligned\n", - IPA_MEM_PART(uc_event_ring_ofst)); - return -ENODEV; - } - - IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n", - IPA_MEM_PART(uc_event_ring_ofst), - IPA_MEM_PART(uc_event_ring_size)); - - IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), - IPA_MEM_PART(nat_size)); - if (IPA_MEM_PART(uc_info_ofst) & 3) { IPAERR("UC INFO OFST 0x%x is unaligned\n", IPA_MEM_PART(uc_info_ofst)); @@ -5074,18 +5155,112 @@ int ipa3_init_mem_partition(enum ipa_hw_type type) return -ENODEV; } + /* + * Routing rules points to hdr_proc_ctx in 32byte offsets from base. + * Base is modem hdr_proc_ctx first address. + * AP driver install APPS hdr_proc_ctx starting at the beginning of + * apps hdr_proc_ctx part. + * So first apps hdr_proc_ctx offset at some routing + * rule will be modem_hdr_proc_ctx_size >> 5 (32B). + */ + if (IPA_MEM_PART(modem_hdr_proc_ctx_size) & 31) { + IPAERR("MODEM HDR PROC CTX SIZE 0x%x is not 32B aligned\n", + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + return -ENODEV; + } + + /* + * AP driver when installing routing rule, it calcs the hdr_proc_ctx + * offset by local offset (from base of apps part) + + * modem_hdr_proc_ctx_size. This is to get offset from modem part base. + * Thus apps part must be adjacent to modem part + */ + if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) != + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size)) { + IPAERR("APPS HDR PROC CTX SIZE not adjacent to MODEM one!\n"); + return -ENODEV; + } + + IPADBG("NAT TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_tbl_ofst), + IPA_MEM_PART(nat_tbl_size)); + + if (IPA_MEM_PART(nat_tbl_ofst) & 31) { + IPAERR("NAT TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_tbl_ofst)); + return -ENODEV; + } + + IPADBG("NAT INDEX TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_index_tbl_ofst), + IPA_MEM_PART(nat_index_tbl_size)); + + if (IPA_MEM_PART(nat_index_tbl_ofst) & 3) { + IPAERR("NAT INDEX TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_index_tbl_ofst)); + return -ENODEV; + } + + IPADBG("NAT EXP TBL OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(nat_exp_tbl_ofst), + IPA_MEM_PART(nat_exp_tbl_size)); + + if (IPA_MEM_PART(nat_exp_tbl_ofst) & 31) { + IPAERR("NAT EXP TBL OFST 0x%x is unaligned\n", + IPA_MEM_PART(nat_exp_tbl_ofst)); + return -ENODEV; + } + IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(pdn_config_ofst), IPA_MEM_PART(pdn_config_size)); - if (IPA_MEM_PART(modem_ofst) & 7) { - IPAERR("MODEM OFST 0x%x is unaligned\n", - IPA_MEM_PART(modem_ofst)); + if (IPA_MEM_PART(pdn_config_ofst) & 7) { + IPAERR("PDN CONFIG OFST 0x%x is unaligned\n", + IPA_MEM_PART(pdn_config_ofst)); return -ENODEV; } - IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), - IPA_MEM_PART(modem_size)); + IPADBG("QUOTA STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_quota_ofst), + IPA_MEM_PART(stats_quota_size)); + + if (IPA_MEM_PART(stats_quota_ofst) & 7) { + IPAERR("QUOTA STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_quota_ofst)); + return -ENODEV; + } + + IPADBG("TETHERING STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_tethering_ofst), + IPA_MEM_PART(stats_tethering_size)); + + if (IPA_MEM_PART(stats_tethering_ofst) & 7) { + IPAERR("TETHERING STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_tethering_ofst)); + return -ENODEV; + } + + IPADBG("FILTER AND ROUTING STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_fnr_ofst), + IPA_MEM_PART(stats_fnr_size)); + + if (IPA_MEM_PART(stats_fnr_ofst) & 7) { + IPAERR("FILTER AND ROUTING STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_fnr_ofst)); + return -ENODEV; + } + + IPADBG("DROP STATS OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(stats_drop_ofst), + IPA_MEM_PART(stats_drop_size)); + + if (IPA_MEM_PART(stats_drop_ofst) & 7) { + IPAERR("DROP STATS OFST 0x%x is unaligned\n", + IPA_MEM_PART(stats_drop_ofst)); + return -ENODEV; + } IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v4_flt_hash_ofst), @@ -5122,6 +5297,25 @@ int ipa3_init_mem_partition(enum ipa_hw_type type) IPA_MEM_PART(apps_v6_rt_nhash_ofst), IPA_MEM_PART(apps_v6_rt_nhash_size)); + if (IPA_MEM_PART(modem_ofst) & 7) { + IPAERR("MODEM OFST 0x%x is unaligned\n", + IPA_MEM_PART(modem_ofst)); + return -ENODEV; + } + + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + if (IPA_MEM_PART(uc_descriptor_ram_ofst) & 1023) { + IPAERR("UC DESCRIPTOR RAM OFST 0x%x is unaligned\n", + IPA_MEM_PART(uc_descriptor_ram_ofst)); + return -ENODEV; + } + + IPADBG("UC DESCRIPTOR RAM OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(uc_descriptor_ram_ofst), + IPA_MEM_PART(uc_descriptor_ram_size)); + return 0; } @@ -6527,6 +6721,15 @@ void ipa3_suspend_apps_pipes(bool suspend) if (ep->valid) { IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx); + /* + * move the channel to callback mode. + * This needs to happen before starting the channel to make + * sure we don't loose any interrupt + */ + if (!suspend && !atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { if (suspend) { res = __ipa3_stop_gsi_channel(ipa_ep_idx); @@ -6546,9 +6749,6 @@ void ipa3_suspend_apps_pipes(bool suspend) } if (suspend) ipa3_gsi_poll_after_suspend(ep); - else if (!atomic_read(&ep->sys->curr_polling_state)) - gsi_config_channel_mode(ep->gsi_chan_hdl, - GSI_CHAN_MODE_CALLBACK); } ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); @@ -6561,6 +6761,14 @@ void ipa3_suspend_apps_pipes(bool suspend) if (ep->valid) { IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx); + /* + * move the channel to callback mode. + * This needs to happen before starting the channel to make + * sure we don't loose any interrupt + */ + if (!suspend && !atomic_read(&ep->sys->curr_polling_state)) + gsi_config_channel_mode(ep->gsi_chan_hdl, + GSI_CHAN_MODE_CALLBACK); if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { if (suspend) { res = __ipa3_stop_gsi_channel(ipa_ep_idx); @@ -6580,9 +6788,6 @@ void ipa3_suspend_apps_pipes(bool suspend) } if (suspend) ipa3_gsi_poll_after_suspend(ep); - else if (!atomic_read(&ep->sys->curr_polling_state)) - gsi_config_channel_mode(ep->gsi_chan_hdl, - GSI_CHAN_MODE_CALLBACK); } } diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 8b362cc525944e9b67730bc84d6ac09928c39c95..8f13ac4ec45dab43bb431211ddaa3222c0e644bf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -89,7 +89,6 @@ static void rmnet_ipa_get_stats_and_update(void); static int ipa3_wwan_add_ul_flt_rule_to_ipa(void); static int ipa3_wwan_del_ul_flt_rule_to_ipa(void); static void ipa3_wwan_msg_free_cb(void*, u32, u32); -static void ipa3_rmnet_rx_cb(void *priv); static int ipa3_rmnet_poll(struct napi_struct *napi, int budget); static void ipa3_wake_tx_queue(struct work_struct *work); @@ -775,7 +774,7 @@ static int find_vchannel_name_index(const char *vchannel_name) { int i; - for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) { if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name, vchannel_name) == 0) return i; @@ -1278,13 +1277,9 @@ static void apps_ipa_packet_receive_notify(void *priv, } dev->stats.rx_packets++; dev->stats.rx_bytes += packet_len; - } else if (evt == IPA_CLIENT_START_POLL) - ipa3_rmnet_rx_cb(priv); - else if (evt == IPA_CLIENT_COMP_NAPI) { - if (ipa3_rmnet_res.ipa_napi_enable) - napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi)); - } else + } else { IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt); + } } static int handle3_ingress_format(struct net_device *dev, @@ -1341,7 +1336,8 @@ static int handle3_ingress_format(struct net_device *dev, ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify; ipa_wan_ep_cfg->priv = dev; - ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable; + if (ipa3_rmnet_res.ipa_napi_enable) + ipa_wan_ep_cfg->napi_obj = &(rmnet_ipa3_ctx->wwan_priv->napi); ipa_wan_ep_cfg->desc_fifo_sz = ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE; @@ -3638,6 +3634,13 @@ void ipa3_q6_handshake_complete(bool ssr_bootup) * SSR recovery */ rmnet_ipa_get_network_stats_and_update(); + } else { + /* + * To enable ipa power collapse we need to enable rpmh and uc + * handshake So that uc can do register retention. To enable + * this handshake we need to send the below message to rpmh + */ + ipa_pc_qmp_enable(); } imp_handle_modem_ready(); @@ -4184,12 +4187,6 @@ static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type) kfree(buff); } -static void ipa3_rmnet_rx_cb(void *priv) -{ - IPAWANDBG_LOW("\n"); - napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi)); -} - static int ipa3_rmnet_poll(struct napi_struct *napi, int budget) { int rcvd_pkts = 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c index 929242a5e26863e356f5023446c257f028deac68..2a406f5e7a34284a325d882cb904ad8dc55863d6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -82,8 +82,17 @@ static long ipa3_wan_ioctl(struct file *filp, DRIVER_NAME); if (!ipa3_process_ioctl) { - IPAWANDBG("modem is in SSR, ignoring ioctl\n"); - return -EAGAIN; + + if ((cmd == WAN_IOC_SET_LAN_CLIENT_INFO) || + (cmd == WAN_IOC_CLEAR_LAN_CLIENT_INFO)) { + IPAWANDBG("Modem is in SSR\n"); + IPAWANDBG("Still allow IOCTL for exceptions (%d)\n", + cmd); + } else { + IPAWANERR("Modem is in SSR, ignoring ioctl (%d)\n", + cmd); + return -EAGAIN; + } } switch (cmd) { diff --git a/drivers/platform/msm/mhi_dev/Makefile b/drivers/platform/msm/mhi_dev/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..53ef716c51498f57a92bb68f0247f5275a67e9fc --- /dev/null +++ b/drivers/platform/msm/mhi_dev/Makefile @@ -0,0 +1,7 @@ +# Makefile for MHI driver +obj-y += mhi_mmio.o +obj-y += mhi.o +obj-y += mhi_ring.o +obj-y += mhi_uci.o +obj-y += mhi_sm.o +obj-y += mhi_dev_net.o diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c new file mode 100644 index 0000000000000000000000000000000000000000..351ad0d1992b375f623832d01378c6c656b54140 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi.c @@ -0,0 +1,2997 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" +#include "mhi_hwio.h" +#include "mhi_sm.h" + +/* Wait time on the device for Host to set M0 state */ +#define MHI_DEV_M0_MAX_CNT 30 +/* Wait time before suspend/resume is complete */ +#define MHI_SUSPEND_MIN 100 +#define MHI_SUSPEND_TIMEOUT 600 +#define MHI_WAKEUP_TIMEOUT_CNT 20 +#define MHI_MASK_CH_EV_LEN 32 +#define MHI_RING_CMD_ID 0 +#define MHI_RING_PRIMARY_EVT_ID 1 +#define MHI_1K_SIZE 0x1000 +/* Updated Specification for event start is NER - 2 and end - NER -1 */ +#define MHI_HW_ACC_EVT_RING_START 2 +#define MHI_HW_ACC_EVT_RING_END 1 + +#define MHI_HOST_REGION_NUM 2 + +#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1 +#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2 + +#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32)) +#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF) +#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF) + +#define MHI_IPC_LOG_PAGES (100) +#define MHI_REGLEN 0x100 +#define MHI_INIT 0 +#define MHI_REINIT 1 + +#define TR_RING_ELEMENT_SZ sizeof(struct mhi_dev_transfer_ring_element) +#define RING_ELEMENT_TYPE_SZ sizeof(union mhi_dev_ring_element_type) + +enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR; +enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE; +void *mhi_ipc_log; + +static struct mhi_dev *mhi_ctx; +static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event, + unsigned long data); +static void mhi_ring_init_cb(void *user_data); +static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info); +static int mhi_deinit(struct mhi_dev *mhi); +static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify); +static int mhi_dev_pcie_notify_event; +static void mhi_dev_transfer_completion_cb(void *mreq); +static struct mhi_dev_uevent_info channel_state_info[MHI_MAX_CHANNELS]; + +/* + * mhi_dev_ring_cache_completion_cb () - Call back function called + * by IPA driver when ring element cache is done + * + * @req : ring cache request + */ +static void mhi_dev_ring_cache_completion_cb(void *req) +{ + struct ring_cache_req *ring_req = NULL; + + ring_req = (struct ring_cache_req *)req; + + complete(ring_req->done); +} + +void mhi_dev_read_from_host(struct mhi_dev *mhi, struct mhi_addr *transfer) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0; + struct ring_cache_req ring_req; + + DECLARE_COMPLETION(done); + + ring_req.done = &done; + + if (WARN_ON(!mhi)) + return; + + if (mhi->config_iatu) { + offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa; + /* Mapping the translated physical address on the device */ + host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset; + } else { + host_addr_pa = transfer->host_pa | bit_40; + } + + mhi_log(MHI_MSG_VERBOSE, + "device 0x%x <<-- host 0x%llx, size %d\n", + transfer->phy_addr, host_addr_pa, + (int) transfer->size); + rc = ipa_dma_async_memcpy((u64)transfer->phy_addr, host_addr_pa, + (int)transfer->size, + mhi_dev_ring_cache_completion_cb, &ring_req); + if (rc) + pr_err("error while reading from host:%d\n", rc); + + wait_for_completion(&done); +} +EXPORT_SYMBOL(mhi_dev_read_from_host); + +void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *transfer, + struct event_req *ereq, enum mhi_dev_transfer_type tr_type) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0; + dma_addr_t dma; + + if (WARN_ON(!mhi)) + return; + + if (mhi->config_iatu) { + offset = (uint64_t) transfer->host_pa - mhi->ctrl_base.host_pa; + /* Mapping the translated physical address on the device */ + host_addr_pa = (uint64_t) mhi->ctrl_base.device_pa + offset; + } else { + host_addr_pa = transfer->host_pa | bit_40; + } + + mhi_log(MHI_MSG_VERBOSE, + "device 0x%llx --> host 0x%llx, size %d\n", + (uint64_t) mhi->cache_dma_handle, host_addr_pa, + (int) transfer->size); + if (tr_type == MHI_DEV_DMA_ASYNC) { + dma = dma_map_single(&mhi->pdev->dev, + transfer->virt_addr, transfer->size, + DMA_TO_DEVICE); + if (ereq->event_type == SEND_EVENT_BUFFER) { + ereq->dma = dma; + ereq->dma_len = transfer->size; + } else if (ereq->event_type == SEND_EVENT_RD_OFFSET) { + ereq->event_rd_dma = dma; + } + rc = ipa_dma_async_memcpy(host_addr_pa, (uint64_t) dma, + (int)transfer->size, + ereq->client_cb, ereq); + if (rc) + pr_err("error while writing to host:%d\n", rc); + } else if (tr_type == MHI_DEV_DMA_SYNC) { + /* Copy the device content to a local device + * physical address. + */ + memcpy(mhi->dma_cache, transfer->virt_addr, + transfer->size); + rc = ipa_dma_sync_memcpy(host_addr_pa, + (u64) mhi->cache_dma_handle, + (int) transfer->size); + if (rc) + pr_err("error while writing to host:%d\n", rc); + } +} +EXPORT_SYMBOL(mhi_dev_write_to_host); + +int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len, + struct mhi_dev *mhi, struct mhi_req *mreq) +{ + int rc = 0; + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0; + struct mhi_dev_ring *ring = NULL; + + + if (WARN_ON(!mhi || !dev || !host_pa || !mreq)) + return -EINVAL; + + if (mhi->config_iatu) { + offset = (uint64_t)host_pa - mhi->data_base.host_pa; + /* Mapping the translated physical address on the device */ + host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset; + } else { + host_addr_pa = host_pa | bit_40; + } + + mhi_log(MHI_MSG_VERBOSE, "device 0x%llx <-- host 0x%llx, size %d\n", + (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len); + + if (mreq->mode == IPA_DMA_SYNC) { + rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle, + host_addr_pa, (int) len); + if (rc) { + pr_err("error while reading chan using sync:%d\n", rc); + return rc; + } + memcpy(dev, mhi->read_handle, len); + } else if (mreq->mode == IPA_DMA_ASYNC) { + ring = mreq->client->channel->ring; + mreq->dma = dma_map_single(&mhi->pdev->dev, dev, len, + DMA_FROM_DEVICE); + mhi_dev_ring_inc_index(ring, ring->rd_offset); + + if (ring->rd_offset == ring->wr_offset) + mreq->snd_cmpl = 1; + else + mreq->snd_cmpl = 0; + rc = ipa_dma_async_memcpy(mreq->dma, host_addr_pa, + (int) len, mhi_dev_transfer_completion_cb, + mreq); + if (rc) { + pr_err("error while reading chan using async:%d\n", rc); + return rc; + } + } + return rc; +} +EXPORT_SYMBOL(mhi_transfer_host_to_device); + +int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len, + struct mhi_dev *mhi, struct mhi_req *req) +{ + uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0, offset = 0; + struct mhi_dev_ring *ring = NULL; + + if (WARN_ON(!mhi || !dev || !req || !host_addr)) + return -EINVAL; + + if (mhi->config_iatu) { + offset = (uint64_t)host_addr - mhi->data_base.host_pa; + /* Mapping the translated physical address on the device */ + host_addr_pa = (uint64_t) mhi->data_base.device_pa + offset; + } else { + host_addr_pa = host_addr | bit_40; + } + mhi_log(MHI_MSG_VERBOSE, "device 0x%llx ---> host 0x%llx, size %d\n", + (uint64_t) mhi->write_dma_handle, + host_addr_pa, (int) len); + + if (req->mode == IPA_DMA_SYNC) { + memcpy(mhi->write_handle, dev, len); + return ipa_dma_sync_memcpy(host_addr_pa, + (u64) mhi->write_dma_handle, (int) len); + } else if (req->mode == IPA_DMA_ASYNC) { + req->dma = dma_map_single(&mhi->pdev->dev, req->buf, + req->len, DMA_TO_DEVICE); + ring = req->client->channel->ring; + mhi_dev_ring_inc_index(ring, ring->rd_offset); + if (ring->rd_offset == ring->wr_offset) + req->snd_cmpl = 1; + return ipa_dma_async_memcpy(host_addr_pa, + (uint64_t) req->dma, (int) len, + mhi_dev_transfer_completion_cb, req); + } + return 0; +} +EXPORT_SYMBOL(mhi_transfer_device_to_host); + +int mhi_dev_is_list_empty(void) +{ + if (list_empty(&mhi_ctx->event_ring_list) && + list_empty(&mhi_ctx->process_ring_list)) + return 0; + + return 1; +} +EXPORT_SYMBOL(mhi_dev_is_list_empty); + +static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi, + struct ep_pcie_db_config *erdb_cfg) +{ + if (mhi->cfg.event_rings == NUM_CHANNELS) { + erdb_cfg->base = HW_CHANNEL_BASE; + erdb_cfg->end = HW_CHANNEL_END; + } else { + erdb_cfg->base = mhi->cfg.event_rings - + MHI_HW_ACC_EVT_RING_START; + erdb_cfg->end = mhi->cfg.event_rings - + MHI_HW_ACC_EVT_RING_END; + } +} + +int mhi_pcie_config_db_routing(struct mhi_dev *mhi) +{ + struct ep_pcie_db_config chdb_cfg, erdb_cfg; + + if (WARN_ON(!mhi)) + return -EINVAL; + + /* Configure Doorbell routing */ + chdb_cfg.base = HW_CHANNEL_BASE; + chdb_cfg.end = HW_CHANNEL_END; + chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb; + + mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg); + + mhi_log(MHI_MSG_VERBOSE, + "Event rings 0x%x => er_base 0x%x, er_end %d\n", + mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end); + erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb; + ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg); + + return 0; +} +EXPORT_SYMBOL(mhi_pcie_config_db_routing); + +static int mhi_hwc_init(struct mhi_dev *mhi) +{ + int rc = 0; + struct ep_pcie_msi_config cfg; + struct ipa_mhi_init_params ipa_init_params; + struct ep_pcie_db_config erdb_cfg; + + /* Call IPA HW_ACC Init with MSI Address and db routing info */ + rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg); + if (rc) { + pr_err("Error retrieving pcie msi logic\n"); + return rc; + } + + rc = mhi_pcie_config_db_routing(mhi); + if (rc) { + pr_err("Error configuring DB routing\n"); + return rc; + } + + mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg); + mhi_log(MHI_MSG_VERBOSE, + "Event rings 0x%x => er_base 0x%x, er_end %d\n", + mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end); + + erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb; + memset(&ipa_init_params, 0, sizeof(ipa_init_params)); + ipa_init_params.msi.addr_hi = cfg.upper; + ipa_init_params.msi.addr_low = cfg.lower; + ipa_init_params.msi.data = cfg.data; + ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1); + ipa_init_params.first_er_idx = erdb_cfg.base; + ipa_init_params.first_ch_idx = HW_CHANNEL_BASE; + + if (mhi_ctx->config_iatu) + ipa_init_params.mmio_addr = + ((uint32_t) mhi_ctx->mmio_base_pa_addr) + MHI_REGLEN; + else + ipa_init_params.mmio_addr = + ((uint32_t) mhi_ctx->mmio_base_pa_addr); + + if (!mhi_ctx->config_iatu) + ipa_init_params.assert_bit40 = true; + + mhi_log(MHI_MSG_VERBOSE, + "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n", + ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data); + ipa_init_params.notify = mhi_hwc_cb; + ipa_init_params.priv = mhi; + + return ipa_mhi_init(&ipa_init_params); +} + +static int mhi_hwc_start(struct mhi_dev *mhi) +{ + struct ipa_mhi_start_params ipa_start_params; + + memset(&ipa_start_params, 0, sizeof(ipa_start_params)); + + if (mhi->config_iatu) { + ipa_start_params.host_ctrl_addr = mhi->ctrl_base.device_pa; + ipa_start_params.host_data_addr = mhi->data_base.device_pa; + } else { + ipa_start_params.channel_context_array_addr = + mhi->ch_ctx_shadow.host_pa; + ipa_start_params.event_context_array_addr = + mhi->ev_ctx_shadow.host_pa; + } + + return ipa_mhi_start(&ipa_start_params); +} + +static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event, + unsigned long data) +{ + int rc = 0; + + switch (event) { + case IPA_MHI_EVENT_READY: + mhi_log(MHI_MSG_INFO, + "HW Channel uC is ready event=0x%X\n", event); + rc = mhi_hwc_start(mhi_ctx); + if (rc) { + pr_err("hwc_init start failed with %d\n", rc); + return; + } + + rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx); + if (rc) { + pr_err("Failed to enable channel db\n"); + return; + } + + rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx); + if (rc) { + pr_err("Failed to enable control interrupt\n"); + return; + } + + rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx); + if (rc) { + pr_err("Failed to enable command db\n"); + return; + } + + mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED); + + ep_pcie_mask_irq_event(mhi_ctx->phandle, + EP_PCIE_INT_EVT_MHI_A7, true); + break; + case IPA_MHI_EVENT_DATA_AVAILABLE: + rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP); + if (rc) { + pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc); + return; + } + break; + default: + pr_err("HW Channel uC unknown event 0x%X\n", event); + break; + } +} + +static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid, + enum mhi_dev_ring_element_type_id type) +{ + int rc = -EINVAL; + struct ipa_mhi_connect_params connect_params; + + memset(&connect_params, 0, sizeof(connect_params)); + + switch (type) { + case MHI_DEV_RING_EL_RESET: + case MHI_DEV_RING_EL_STOP: + rc = ipa_mhi_disconnect_pipe( + mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]); + if (rc) + pr_err("Stopping HW Channel%d failed 0x%X\n", + chid, rc); + break; + case MHI_DEV_RING_EL_START: + connect_params.channel_id = chid; + connect_params.sys.skip_ep_cfg = true; + if ((chid % 2) == 0x0) + connect_params.sys.client = IPA_CLIENT_MHI_PROD; + else + connect_params.sys.client = IPA_CLIENT_MHI_CONS; + + rc = ipa_mhi_connect_pipe(&connect_params, + &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]); + if (rc) + pr_err("HW Channel%d start failed 0x%X\n", + chid, rc); + break; + case MHI_DEV_RING_EL_INVALID: + default: + pr_err("Invalid Ring Element type = 0x%X\n", type); + break; + } + + return rc; +} + +static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev, + uint32_t *int_value) +{ + int rc = 0; + + rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value); + if (rc) { + pr_err("Failed to read A7 status\n"); + return; + } + + rc = mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value); + if (rc) { + pr_err("Failed to clear A7 status\n"); + return; + } +} + +static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id) +{ + struct mhi_addr data_transfer; + + if (mhi->use_ipa) { + data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + data_transfer.phy_addr = mhi->ch_ctx_cache_dma_handle + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } + + data_transfer.size = sizeof(struct mhi_dev_ch_ctx); + /* Fetch the channel ctx (*dst, *src, size) */ + mhi_dev_read_from_host(mhi, &data_transfer); +} + +int mhi_dev_syserr(struct mhi_dev *mhi) +{ + if (WARN_ON(!mhi)) + return -EINVAL; + + pr_err("MHI dev sys error\n"); + + return mhi_dev_dump_mmio(mhi); +} +EXPORT_SYMBOL(mhi_dev_syserr); + +int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring, + union mhi_dev_ring_element_type *el) +{ + int rc = 0; + uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring; + struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx]; + union mhi_dev_ring_ctx *ctx; + struct ep_pcie_msi_config cfg; + struct mhi_addr transfer_addr; + + rc = ep_pcie_get_msi_config(mhi->phandle, &cfg); + if (rc) { + pr_err("Error retrieving pcie msi logic\n"); + return rc; + } + + if (evnt_ring_idx > mhi->cfg.event_rings) { + pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx); + return -EINVAL; + } + + ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring]; + if (mhi_ring_get_state(ring) == RING_STATE_UINT) { + rc = mhi_ring_start(ring, ctx, mhi); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error starting event ring %d\n", evnt_ring); + return rc; + } + } + + mutex_lock(&mhi->mhi_event_lock); + /* add the ring element */ + mhi_dev_add_element(ring, el, NULL, 0); + + ring->ring_ctx_shadow->ev.rp = (ring->rd_offset * + sizeof(union mhi_dev_ring_element_type)) + + ring->ring_ctx->generic.rbase; + + mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n", + ring->ring_ctx_shadow->ev.rp, evnt_ring_idx); + + if (mhi->use_ipa) + transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ev_ctx) * + evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - + (uint32_t) ring->ring_ctx; + else + transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + + sizeof(struct mhi_dev_ev_ctx) * + evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp - + (uint32_t) ring->ring_ctx; + + transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; + transfer_addr.size = sizeof(uint64_t); + + mhi_dev_write_to_host(mhi, &transfer_addr, NULL, MHI_DEV_DMA_SYNC); + /* + * rp update in host memory should be flushed + * before sending a MSI to the host + */ + wmb(); + + mutex_unlock(&mhi->mhi_event_lock); + mhi_log(MHI_MSG_VERBOSE, "event sent:\n"); + mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr); + mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x\n", el->evt_tr_comp.len); + mhi_log(MHI_MSG_VERBOSE, "evnt code :0x%x\n", el->evt_tr_comp.code); + mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n", el->evt_tr_comp.type); + mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n", el->evt_tr_comp.chid); + + return ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec); +} + +/* + * mhi_dev_event_buf_completion_cb() -Cb function called by IPA driver + * when transfer completion event buffer copy is done. + * + * @req - event_req structure + */ + +static void mhi_dev_event_buf_completion_cb(void *req) +{ + struct event_req *ereq = NULL; + + ereq = (struct event_req *)req; + + dma_unmap_single(&mhi_ctx->pdev->dev, ereq->dma, + ereq->dma_len, DMA_TO_DEVICE); +} + +/** + * mhi_dev_event_rd_offset_completion_cb() -CB function called by IPA driver + * when event rd_offset transfer is done. + * + * @req - event_req structure + */ + +static void mhi_dev_event_rd_offset_completion_cb(void *req) +{ + union mhi_dev_ring_ctx *ctx; + int rc = 0; + struct event_req *ereq = (struct event_req *)req; + struct mhi_dev_channel *ch = ereq->context; + struct mhi_dev *mhi = ch->ring->mhi_dev; + unsigned long flags; + + dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma, + sizeof(uint64_t), DMA_TO_DEVICE); + ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring]; + rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec); + if (rc) + pr_err("%s: error sending in msi\n", __func__); + + /* return the event req to pre allocated pooled list */ + spin_lock_irqsave(&mhi->lock, flags); + list_add_tail(&ereq->list, &ch->event_req_buffers); + spin_unlock_irqrestore(&mhi->lock, flags); +} + +static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring, + struct event_req *ereq, uint32_t evt_len) +{ + int rc = 0; + uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring; + struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx]; + union mhi_dev_ring_ctx *ctx; + struct mhi_addr transfer_addr; + + rc = ep_pcie_get_msi_config(mhi->phandle, &mhi->msi_cfg); + if (rc) { + pr_err("Error retrieving pcie msi logic\n"); + return rc; + } + + if (evnt_ring_idx > mhi->cfg.event_rings) { + pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx); + return -EINVAL; + } + + ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring]; + if (mhi_ring_get_state(ring) == RING_STATE_UINT) { + rc = mhi_ring_start(ring, ctx, mhi); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error starting event ring %d\n", evnt_ring); + return rc; + } + } + + /* add the ring element */ + ereq->client_cb = mhi_dev_event_buf_completion_cb; + ereq->event_type = SEND_EVENT_BUFFER; + rc = mhi_dev_add_element(ring, ereq->tr_events, ereq, evt_len); + if (rc) { + pr_err("%s(): error in adding element rc %d\n", __func__, rc); + return rc; + } + ring->ring_ctx_shadow->ev.rp = (ring->rd_offset * + sizeof(union mhi_dev_ring_element_type)) + + ring->ring_ctx->generic.rbase; + + mhi_log(MHI_MSG_VERBOSE, "ev.rp = %llx for %lld\n", + ring->ring_ctx_shadow->ev.rp, evnt_ring_idx); + + if (mhi->use_ipa) + transfer_addr.host_pa = (mhi->ev_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ev_ctx) * + evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - + (uint32_t)ring->ring_ctx; + else + transfer_addr.device_va = (mhi->ev_ctx_shadow.device_va + + sizeof(struct mhi_dev_ev_ctx) * + evnt_ring) + (uint32_t)&ring->ring_ctx->ev.rp - + (uint32_t)ring->ring_ctx; + + transfer_addr.virt_addr = &ring->ring_ctx_shadow->ev.rp; + transfer_addr.size = sizeof(uint64_t); + ereq->event_type = SEND_EVENT_RD_OFFSET; + ereq->client_cb = mhi_dev_event_rd_offset_completion_cb; + ereq->event_ring = evnt_ring; + mhi_dev_write_to_host(mhi, &transfer_addr, ereq, MHI_DEV_DMA_ASYNC); + return 0; +} + +static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch, + uint32_t rd_ofst, uint32_t len, + enum mhi_dev_cmd_completion_code code) +{ + union mhi_dev_ring_element_type compl_event; + struct mhi_dev *mhi = ch->ring->mhi_dev; + + compl_event.evt_tr_comp.chid = ch->ch_id; + compl_event.evt_tr_comp.type = + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT; + compl_event.evt_tr_comp.len = len; + compl_event.evt_tr_comp.code = code; + compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase + + rd_ofst * sizeof(struct mhi_dev_transfer_ring_element); + + return mhi_dev_send_event(mhi, + mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event); +} + +int mhi_dev_send_state_change_event(struct mhi_dev *mhi, + enum mhi_dev_state state) +{ + union mhi_dev_ring_element_type event; + + event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG; + event.evt_state_change.mhistate = state; + + return mhi_dev_send_event(mhi, 0, &event); +} +EXPORT_SYMBOL(mhi_dev_send_state_change_event); + +int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env) +{ + union mhi_dev_ring_element_type event; + + event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY; + event.evt_ee_state.execenv = exec_env; + + return mhi_dev_send_event(mhi, 0, &event); +} +EXPORT_SYMBOL(mhi_dev_send_ee_event); + +static void mhi_dev_trigger_cb(enum mhi_client_channel ch_id) +{ + struct mhi_dev_ready_cb_info *info; + enum mhi_ctrl_info state_data; + + list_for_each_entry(info, &mhi_ctx->client_cb_list, list) + if (info->cb && info->cb_data.channel == ch_id) { + mhi_ctrl_state_info(info->cb_data.channel, &state_data); + info->cb_data.ctrl_info = state_data; + info->cb(&info->cb_data); + } +} + +int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi) +{ + /* + * Expected usage is when there is HW ACC traffic IPA uC notifes + * Q6 -> IPA A7 -> MHI core -> MHI SM + */ + return mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP); +} +EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup); + +static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi, + enum mhi_dev_cmd_completion_code code) +{ + union mhi_dev_ring_element_type event; + + if (code > MHI_CMD_COMPL_CODE_RES) { + mhi_log(MHI_MSG_ERROR, + "Invalid cmd compl code: %d\n", code); + return -EINVAL; + } + + /* send the command completion event to the host */ + event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase + + (mhi->ring[MHI_RING_CMD_ID].rd_offset * + (sizeof(union mhi_dev_ring_element_type))); + mhi_log(MHI_MSG_VERBOSE, "evt cmd comp ptr :%d\n", + (uint32_t) event.evt_cmd_comp.ptr); + event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT; + event.evt_cmd_comp.code = code; + return mhi_dev_send_event(mhi, 0, &event); +} + +static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id, + struct mhi_dev *mhi) +{ + struct mhi_addr data_transfer; + + if (ring->rd_offset != ring->wr_offset && + mhi->ch_ctx_cache[ch_id].ch_type == + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) { + mhi_log(MHI_MSG_INFO, "Pending outbound transaction\n"); + return 0; + } else if (mhi->ch_ctx_cache[ch_id].ch_type == + MHI_DEV_CH_TYPE_INBOUND_CHANNEL && + mhi->ch[ch_id].wr_request_active) { + mhi_log(MHI_MSG_INFO, "Pending inbound transaction\n"); + return 0; + } + + /* set the channel to stop */ + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP; + mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED; + + if (mhi->use_ipa) { + data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } else { + data_transfer.device_va = mhi->ch_ctx_shadow.device_va + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } + data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state); + data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state; + + /* update the channel state in the host */ + mhi_dev_write_to_host(mhi, &data_transfer, NULL, MHI_DEV_DMA_SYNC); + + /* send the completion event to the host */ + return mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_SUCCESS); +} + +static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi, + union mhi_dev_ring_element_type *el, void *ctx) +{ + int rc = 0; + uint32_t ch_id = 0; + union mhi_dev_ring_element_type event; + struct mhi_addr host_addr; + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + char *connected[2] = { "MHI_CHANNEL_STATE_12=CONNECTED", NULL}; + char *disconnected[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL}; + + ch_id = el->generic.chid; + mhi_log(MHI_MSG_VERBOSE, "for channel:%d and cmd:%d\n", + ch_id, el->generic.type); + + switch (el->generic.type) { + case MHI_DEV_RING_EL_START: + mhi_log(MHI_MSG_VERBOSE, "recived start cmd for channel %d\n", + ch_id); + if (ch_id >= (HW_CHANNEL_BASE)) { + rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error with HW channel cmd %d\n", rc); + rc = mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_UNDEFINED); + if (rc) + mhi_log(MHI_MSG_ERROR, + "Error with compl event\n"); + return; + } + goto send_start_completion_event; + } + + /* fetch the channel context from host */ + mhi_dev_fetch_ch_ctx(mhi, ch_id); + + /* Initialize and configure the corresponding channel ring */ + rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id], + (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id], + mhi); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "start ring failed for ch %d\n", ch_id); + rc = mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_UNDEFINED); + if (rc) + mhi_log(MHI_MSG_ERROR, + "Error with compl event\n"); + return; + } + + mhi->ring[mhi->ch_ring_start + ch_id].state = + RING_STATE_PENDING; + + /* set the channel to running */ + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING; + mhi->ch[ch_id].state = MHI_DEV_CH_STARTED; + mhi->ch[ch_id].ch_id = ch_id; + mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id]; + mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type; + + /* enable DB for event ring */ + rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id); + if (rc) { + pr_err("Failed to enable channel db\n"); + rc = mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_UNDEFINED); + if (rc) + mhi_log(MHI_MSG_ERROR, + "Error with compl event\n"); + return; + } + + if (mhi->use_ipa) + host_addr.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + else + host_addr.device_va = mhi->ch_ctx_shadow.device_va + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + + host_addr.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state; + host_addr.size = sizeof(enum mhi_dev_ch_ctx_state); + + mhi_dev_write_to_host(mhi, &host_addr, NULL, MHI_DEV_DMA_SYNC); + +send_start_completion_event: + rc = mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_SUCCESS); + if (rc) + pr_err("Error sending command completion event\n"); + + mhi_update_state_info(ch_id, MHI_STATE_CONNECTED); + /* Trigger callback to clients */ + mhi_dev_trigger_cb(ch_id); + if (ch_id == MHI_CLIENT_MBIM_OUT) + kobject_uevent_env(&mhi_ctx->dev->kobj, + KOBJ_CHANGE, connected); + break; + case MHI_DEV_RING_EL_STOP: + if (ch_id >= HW_CHANNEL_BASE) { + rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type); + if (rc) + mhi_log(MHI_MSG_ERROR, + "send channel stop cmd event failed\n"); + + /* send the completion event to the host */ + event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase + + (mhi->ring[MHI_RING_CMD_ID].rd_offset * + (sizeof(union mhi_dev_ring_element_type))); + event.evt_cmd_comp.type = + MHI_DEV_RING_EL_CMD_COMPLETION_EVT; + if (rc == 0) + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_SUCCESS; + else + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_UNDEFINED; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) { + pr_err("stop event send failed\n"); + return; + } + } else { + /* + * Check if there are any pending transactions for the + * ring associated with the channel. If no, proceed to + * write disable the channel state else send stop + * channel command to check if one can suspend the + * command. + */ + ring = &mhi->ring[ch_id + mhi->ch_ring_start]; + if (ring->state == RING_STATE_UINT) { + pr_err("Channel not opened for %d\n", ch_id); + return; + } + + ch = &mhi->ch[ch_id]; + + mutex_lock(&ch->ch_lock); + + mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP; + rc = mhi_dev_process_stop_cmd( + &mhi->ring[mhi->ch_ring_start + ch_id], + ch_id, mhi); + if (rc) + pr_err("stop event send failed\n"); + + mutex_unlock(&ch->ch_lock); + mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED); + if (ch_id == MHI_CLIENT_MBIM_OUT) + kobject_uevent_env(&mhi_ctx->dev->kobj, + KOBJ_CHANGE, disconnected); + } + break; + case MHI_DEV_RING_EL_RESET: + mhi_log(MHI_MSG_VERBOSE, + "received reset cmd for channel %d\n", ch_id); + if (ch_id >= HW_CHANNEL_BASE) { + rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type); + if (rc) + mhi_log(MHI_MSG_ERROR, + "send channel stop cmd event failed\n"); + + /* send the completion event to the host */ + event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase + + (mhi->ring[MHI_RING_CMD_ID].rd_offset * + (sizeof(union mhi_dev_ring_element_type))); + event.evt_cmd_comp.type = + MHI_DEV_RING_EL_CMD_COMPLETION_EVT; + if (rc == 0) + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_SUCCESS; + else + event.evt_cmd_comp.code = + MHI_CMD_COMPL_CODE_UNDEFINED; + + rc = mhi_dev_send_event(mhi, 0, &event); + if (rc) { + pr_err("stop event send failed\n"); + return; + } + } else { + + mhi_log(MHI_MSG_VERBOSE, + "received reset cmd for channel %d\n", + ch_id); + + ring = &mhi->ring[ch_id + mhi->ch_ring_start]; + if (ring->state == RING_STATE_UINT) { + pr_err("Channel not opened for %d\n", ch_id); + return; + } + + ch = &mhi->ch[ch_id]; + + mutex_lock(&ch->ch_lock); + + /* hard stop and set the channel to stop */ + mhi->ch_ctx_cache[ch_id].ch_state = + MHI_DEV_CH_STATE_DISABLED; + mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED; + if (mhi->use_ipa) + host_addr.host_pa = + mhi->ch_ctx_shadow.host_pa + + (sizeof(struct mhi_dev_ch_ctx) * ch_id); + else + host_addr.device_va = + mhi->ch_ctx_shadow.device_va + + (sizeof(struct mhi_dev_ch_ctx) * ch_id); + + host_addr.virt_addr = + &mhi->ch_ctx_cache[ch_id].ch_state; + host_addr.size = sizeof(enum mhi_dev_ch_ctx_state); + + /* update the channel state in the host */ + mhi_dev_write_to_host(mhi, &host_addr, NULL, + MHI_DEV_DMA_SYNC); + + /* send the completion event to the host */ + rc = mhi_dev_send_cmd_comp_event(mhi, + MHI_CMD_COMPL_CODE_SUCCESS); + if (rc) + pr_err("Error sending command completion event\n"); + mutex_unlock(&ch->ch_lock); + mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED); + if (ch_id == MHI_CLIENT_MBIM_OUT) + kobject_uevent_env(&mhi_ctx->dev->kobj, + KOBJ_CHANGE, disconnected); + } + break; + default: + pr_err("%s: Invalid command:%d\n", __func__, el->generic.type); + break; + } +} + +static void mhi_dev_process_tre_ring(struct mhi_dev *mhi, + union mhi_dev_ring_element_type *el, void *ctx) +{ + struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx; + struct mhi_dev_channel *ch; + struct mhi_dev_client_cb_reason reason; + + if (ring->id < mhi->ch_ring_start) { + mhi_log(MHI_MSG_VERBOSE, + "invalid channel ring id (%d), should be < %d\n", + ring->id, mhi->ch_ring_start); + return; + } + + ch = &mhi->ch[ring->id - mhi->ch_ring_start]; + reason.ch_id = ch->ch_id; + reason.reason = MHI_DEV_TRE_AVAILABLE; + + /* Invoke a callback to let the client know its data is ready. + * Copy this event to the clients context so that it can be + * sent out once the client has fetch the data. Update the rp + * before sending the data as part of the event completion + */ + if (ch->active_client && ch->active_client->event_trigger != NULL) + ch->active_client->event_trigger(&reason); +} + +static void mhi_dev_process_ring_pending(struct work_struct *work) +{ + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, pending_work); + struct list_head *cp, *q; + struct mhi_dev_ring *ring; + struct mhi_dev_channel *ch; + int rc = 0; + + mutex_lock(&mhi_ctx->mhi_lock); + rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]); + if (rc) { + mhi_log(MHI_MSG_ERROR, "error processing command ring\n"); + goto exit; + } + + list_for_each_safe(cp, q, &mhi->process_ring_list) { + ring = list_entry(cp, struct mhi_dev_ring, list); + list_del(cp); + mhi_log(MHI_MSG_VERBOSE, "processing ring %d\n", ring->id); + rc = mhi_dev_process_ring(ring); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error processing ring %d\n", ring->id); + goto exit; + } + + if (ring->id < mhi->ch_ring_start) { + mhi_log(MHI_MSG_ERROR, + "ring (%d) is not a channel ring\n", ring->id); + goto exit; + } + + ch = &mhi->ch[ring->id - mhi->ch_ring_start]; + rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "error enabling chdb interrupt for %d\n", ch->ch_id); + goto exit; + } + } + +exit: + mutex_unlock(&mhi_ctx->mhi_lock); +} + +static int mhi_dev_get_event_notify(enum mhi_dev_state state, + enum mhi_dev_event *event) +{ + int rc = 0; + + switch (state) { + case MHI_DEV_M0_STATE: + *event = MHI_DEV_EVENT_M0_STATE; + break; + case MHI_DEV_M1_STATE: + *event = MHI_DEV_EVENT_M1_STATE; + break; + case MHI_DEV_M2_STATE: + *event = MHI_DEV_EVENT_M2_STATE; + break; + case MHI_DEV_M3_STATE: + *event = MHI_DEV_EVENT_M3_STATE; + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static void mhi_dev_queue_channel_db(struct mhi_dev *mhi, + uint32_t chintr_value, uint32_t ch_num) +{ + struct mhi_dev_ring *ring; + int rc = 0; + + for (; chintr_value; ch_num++, chintr_value >>= 1) { + if (chintr_value & 1) { + ring = &mhi->ring[ch_num + mhi->ch_ring_start]; + if (ring->state == RING_STATE_UINT) { + pr_debug("Channel not opened for %d\n", ch_num); + break; + } + mhi_ring_set_state(ring, RING_STATE_PENDING); + list_add(&ring->list, &mhi->process_ring_list); + rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num); + if (rc) { + pr_err("Error disabling chdb\n"); + return; + } + queue_work(mhi->pending_ring_wq, &mhi->pending_work); + } + } +} + +static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi) +{ + int i, rc = 0; + uint32_t chintr_value = 0, ch_num = 0; + + rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi); + if (rc) { + pr_err("Read channel db\n"); + return; + } + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + ch_num = i * MHI_MASK_CH_EV_LEN; + /* Process channel status whose mask is enabled */ + chintr_value = (mhi->chdb[i].status & mhi->chdb[i].mask); + if (chintr_value) { + mhi_log(MHI_MSG_VERBOSE, + "processing id: %d, ch interrupt 0x%x\n", + i, chintr_value); + mhi_dev_queue_channel_db(mhi, chintr_value, ch_num); + rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i), + mhi->chdb[i].status); + if (rc) { + pr_err("Error writing interrupt clear for A7\n"); + return; + } + } + } +} + +static int mhi_dev_abort(struct mhi_dev *mhi) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + int ch_id = 0, rc = 0; + char *disconnected_12[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL}; + char *disconnected_14[2] = { "MHI_CHANNEL_STATE_14=DISCONNECTED", NULL}; + + /* Hard stop all the channels */ + for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) { + ring = &mhi->ring[ch_id + mhi->ch_ring_start]; + if (ring->state == RING_STATE_UINT) + continue; + + ch = &mhi->ch[ch_id]; + mutex_lock(&ch->ch_lock); + mhi->ch[ch_id].state = MHI_DEV_CH_STOPPED; + mutex_unlock(&ch->ch_lock); + } + + /* Update ctrl node */ + mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_DISCONNECTED); + mhi_update_state_info(MHI_CLIENT_MBIM_OUT, MHI_STATE_DISCONNECTED); + mhi_update_state_info(MHI_CLIENT_QMI_OUT, MHI_STATE_DISCONNECTED); + rc = kobject_uevent_env(&mhi_ctx->dev->kobj, + KOBJ_CHANGE, disconnected_12); + if (rc) + pr_err("Error sending uevent:%d\n", rc); + + rc = kobject_uevent_env(&mhi_ctx->dev->kobj, + KOBJ_CHANGE, disconnected_14); + if (rc) + pr_err("Error sending uevent:%d\n", rc); + + flush_workqueue(mhi->ring_init_wq); + flush_workqueue(mhi->pending_ring_wq); + + /* Initiate MHI IPA reset */ + ipa_mhi_destroy(); + + /* Clean up initialized channels */ + rc = mhi_deinit(mhi); + if (rc) { + pr_err("Error during mhi_deinit with %d\n", rc); + return rc; + } + + rc = mhi_dev_mmio_mask_chdb_interrupts(mhi_ctx); + if (rc) { + pr_err("Failed to enable channel db\n"); + return rc; + } + + rc = mhi_dev_mmio_disable_ctrl_interrupt(mhi_ctx); + if (rc) { + pr_err("Failed to enable control interrupt\n"); + return rc; + } + + rc = mhi_dev_mmio_disable_cmdb_interrupt(mhi_ctx); + if (rc) { + pr_err("Failed to enable command db\n"); + return rc; + } + + + atomic_set(&mhi_ctx->re_init_done, 0); + + mhi_log(MHI_MSG_INFO, + "Register a PCIe callback during re-init\n"); + mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP; + mhi_ctx->event_reg.user = mhi_ctx; + mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK; + mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up; + mhi_ctx->event_reg.options = MHI_REINIT; + + rc = ep_pcie_register_event(mhi_ctx->phandle, + &mhi_ctx->event_reg); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + return rc; + } + + /* Set RESET field to 0 */ + mhi_dev_mmio_reset(mhi_ctx); + + return rc; +} + +static void mhi_dev_transfer_completion_cb(void *mreq) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_client *client; + union mhi_dev_ring_element_type *el; + int rc = 0; + struct mhi_req *req = (struct mhi_req *)mreq; + union mhi_dev_ring_element_type *compl_ev = NULL; + struct mhi_dev *mhi = NULL; + unsigned long flags; + size_t transfer_len; + u32 snd_cmpl; + uint32_t rd_offset; + + client = req->client; + ch = client->channel; + mhi = ch->ring->mhi_dev; + el = req->el; + transfer_len = req->len; + snd_cmpl = req->snd_cmpl; + rd_offset = req->rd_offset; + ch->curr_ereq->context = ch; + + dma_unmap_single(&mhi_ctx->pdev->dev, req->dma, + req->len, DMA_FROM_DEVICE); + + /* Trigger client call back */ + req->client_cb(req); + + if (el->tre.ieot) { + compl_ev = ch->curr_ereq->tr_events + ch->curr_ereq->num_events; + compl_ev->evt_tr_comp.chid = ch->ch_id; + compl_ev->evt_tr_comp.type = + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT; + compl_ev->evt_tr_comp.len = transfer_len; + compl_ev->evt_tr_comp.code = MHI_CMD_COMPL_CODE_EOT; + compl_ev->evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase + + rd_offset * TR_RING_ELEMENT_SZ; + ch->curr_ereq->num_events++; + + if (ch->curr_ereq->num_events >= MAX_TR_EVENTS || snd_cmpl) { + mhi_log(MHI_MSG_VERBOSE, + "num of tr events %d for ch %d\n", + ch->curr_ereq->num_events, ch->ch_id); + rc = mhi_dev_send_multiple_tr_events(mhi, + mhi->ch_ctx_cache[ch->ch_id].err_indx, + ch->curr_ereq, (ch->curr_ereq->num_events* + sizeof(union mhi_dev_ring_element_type))); + if (rc) + mhi_log(MHI_MSG_ERROR, + "failed to send compl evts\n"); + if (!list_empty(&ch->event_req_buffers)) { + ch->curr_ereq = + container_of(ch->event_req_buffers.next, + struct event_req, list); + spin_lock_irqsave(&mhi->lock, flags); + list_del_init(&ch->curr_ereq->list); + spin_unlock_irqrestore(&mhi->lock, flags); + ch->curr_ereq->num_events = 0; + } else + pr_err("%s evt req buffers empty\n", __func__); + } + } else + mhi_log(MHI_MSG_ERROR, "ieot is not valid\n"); + + if (ch->state == MHI_DEV_CH_PENDING_STOP) { + ch->state = MHI_DEV_CH_STOPPED; + rc = mhi_dev_process_stop_cmd(ch->ring, ch->ch_id, mhi_ctx); + if (rc) + mhi_log(MHI_MSG_ERROR, + "Error while stopping channel (%d)\n", ch->ch_id); + } +} + +static void mhi_dev_scheduler(struct work_struct *work) +{ + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, chdb_ctrl_work); + int rc = 0; + uint32_t int_value = 0; + struct mhi_dev_ring *ring; + enum mhi_dev_state state; + enum mhi_dev_event event = 0; + bool mhi_reset = false; + uint32_t bhi_imgtxdb = 0; + + mutex_lock(&mhi_ctx->mhi_lock); + /* Check for interrupts */ + mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value); + + if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) { + mhi_log(MHI_MSG_VERBOSE, + "processing ctrl interrupt with %d\n", int_value); + rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + mutex_unlock(&mhi_ctx->mhi_lock); + return; + } + + if (mhi_reset) { + mhi_log(MHI_MSG_VERBOSE, + "processing mhi device reset\n"); + rc = mhi_dev_abort(mhi); + if (rc) + pr_err("device reset failed:%d\n", rc); + mutex_unlock(&mhi_ctx->mhi_lock); + queue_work(mhi->ring_init_wq, &mhi->re_init); + return; + } + + rc = mhi_dev_get_event_notify(state, &event); + if (rc) { + pr_err("unsupported state :%d\n", state); + goto fail; + } + + rc = mhi_dev_notify_sm_event(event); + if (rc) { + pr_err("error sending SM event\n"); + goto fail; + } + + rc = mhi_dev_mmio_read(mhi, BHI_IMGTXDB, &bhi_imgtxdb); + mhi_log(MHI_MSG_VERBOSE, + "BHI_IMGTXDB = 0x%x\n", bhi_imgtxdb); + } + + if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) { + mhi_log(MHI_MSG_VERBOSE, + "processing cmd db interrupt with %d\n", int_value); + ring = &mhi->ring[MHI_RING_CMD_ID]; + ring->state = RING_STATE_PENDING; + queue_work(mhi->pending_ring_wq, &mhi->pending_work); + } + + /* get the specific channel interrupts */ + mhi_dev_check_channel_interrupt(mhi); + +fail: + mutex_unlock(&mhi_ctx->mhi_lock); + + if (mhi->config_iatu || mhi->mhi_int) + enable_irq(mhi->mhi_irq); + else + ep_pcie_mask_irq_event(mhi->phandle, + EP_PCIE_INT_EVT_MHI_A7, true); +} + +void mhi_dev_notify_a7_event(struct mhi_dev *mhi) +{ + + if (!atomic_read(&mhi->mhi_dev_wake)) { + pm_stay_awake(mhi->dev); + atomic_set(&mhi->mhi_dev_wake, 1); + } + mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock\n"); + + schedule_work(&mhi->chdb_ctrl_work); + mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n"); +} +EXPORT_SYMBOL(mhi_dev_notify_a7_event); + +static irqreturn_t mhi_dev_isr(int irq, void *dev_id) +{ + struct mhi_dev *mhi = dev_id; + + if (!atomic_read(&mhi->mhi_dev_wake)) { + pm_stay_awake(mhi->dev); + atomic_set(&mhi->mhi_dev_wake, 1); + mhi_log(MHI_MSG_VERBOSE, "acquiring mhi wakelock in ISR\n"); + } + + disable_irq_nosync(mhi->mhi_irq); + schedule_work(&mhi->chdb_ctrl_work); + mhi_log(MHI_MSG_VERBOSE, "mhi irq triggered\n"); + + return IRQ_HANDLED; +} + +int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi) +{ + struct ep_pcie_iatu control, data; + struct ep_pcie_iatu entries[MHI_HOST_REGION_NUM]; + + data.start = mhi->data_base.device_pa; + data.end = mhi->data_base.device_pa + mhi->data_base.size - 1; + data.tgt_lower = HOST_ADDR_LSB(mhi->data_base.host_pa); + data.tgt_upper = HOST_ADDR_MSB(mhi->data_base.host_pa); + + control.start = mhi->ctrl_base.device_pa; + control.end = mhi->ctrl_base.device_pa + mhi->ctrl_base.size - 1; + control.tgt_lower = HOST_ADDR_LSB(mhi->ctrl_base.host_pa); + control.tgt_upper = HOST_ADDR_MSB(mhi->ctrl_base.host_pa); + + entries[0] = data; + entries[1] = control; + + return ep_pcie_config_outbound_iatu(mhi_ctx->phandle, entries, + MHI_HOST_REGION_NUM); +} +EXPORT_SYMBOL(mhi_dev_config_outbound_iatu); + +static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi) +{ + int rc = 0; + struct platform_device *pdev; + uint64_t addr1 = 0; + struct mhi_addr data_transfer; + + pdev = mhi->pdev; + + /* Get host memory region configuration */ + mhi_dev_get_mhi_addr(mhi); + + mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb, + mhi->host_addr.ctrl_base_msb); + mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb, + mhi->host_addr.data_base_msb); + + addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb, + mhi->host_addr.ctrl_limit_msb); + mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa; + addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb, + mhi->host_addr.data_limit_msb); + mhi->data_base.size = addr1 - mhi->data_base.host_pa; + + if (mhi->config_iatu) { + if (mhi->ctrl_base.host_pa > mhi->data_base.host_pa) { + mhi->data_base.device_pa = mhi->device_local_pa_base; + mhi->ctrl_base.device_pa = mhi->device_local_pa_base + + mhi->ctrl_base.host_pa - mhi->data_base.host_pa; + } else { + mhi->ctrl_base.device_pa = mhi->device_local_pa_base; + mhi->data_base.device_pa = mhi->device_local_pa_base + + mhi->data_base.host_pa - mhi->ctrl_base.host_pa; + } + + if (!mhi->use_ipa) { + mhi->ctrl_base.device_va = + (uintptr_t) devm_ioremap_nocache(&pdev->dev, + mhi->ctrl_base.device_pa, + mhi->ctrl_base.size); + if (!mhi->ctrl_base.device_va) { + pr_err("io remap failed for mhi address\n"); + return -EINVAL; + } + } + } + + if (mhi->config_iatu) { + rc = mhi_dev_config_outbound_iatu(mhi); + if (rc) { + pr_err("Configuring iATU failed\n"); + return rc; + } + } + + /* Get Channel, event and command context base pointer */ + rc = mhi_dev_mmio_get_chc_base(mhi); + if (rc) { + pr_err("Fetching channel context failed\n"); + return rc; + } + + rc = mhi_dev_mmio_get_erc_base(mhi); + if (rc) { + pr_err("Fetching event ring context failed\n"); + return rc; + } + + rc = mhi_dev_mmio_get_crc_base(mhi); + if (rc) { + pr_err("Fetching command ring context failed\n"); + return rc; + } + + rc = mhi_dev_update_ner(mhi); + if (rc) { + pr_err("Fetching NER failed\n"); + return rc; + } + + mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx); + mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings; + mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels; + + mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_cmd_ctx), + &mhi->cmd_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->cmd_ctx_cache) { + pr_err("no memory while allocating cmd ctx\n"); + return -ENOMEM; + } + memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx)); + + mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings, + &mhi->ev_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->ev_ctx_cache) + return -ENOMEM; + memset(mhi->ev_ctx_cache, 0, sizeof(struct mhi_dev_ev_ctx) * + mhi->cfg.event_rings); + + mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev, + sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels, + &mhi->ch_ctx_cache_dma_handle, + GFP_KERNEL); + if (!mhi->ch_ctx_cache) + return -ENOMEM; + memset(mhi->ch_ctx_cache, 0, sizeof(struct mhi_dev_ch_ctx) * + mhi->cfg.channels); + + if (mhi->use_ipa) { + data_transfer.phy_addr = mhi->cmd_ctx_cache_dma_handle; + data_transfer.host_pa = mhi->cmd_ctx_shadow.host_pa; + } + + data_transfer.size = mhi->cmd_ctx_shadow.size; + + /* Cache the command and event context */ + mhi_dev_read_from_host(mhi, &data_transfer); + + if (mhi->use_ipa) { + data_transfer.phy_addr = mhi->ev_ctx_cache_dma_handle; + data_transfer.host_pa = mhi->ev_ctx_shadow.host_pa; + } + + data_transfer.size = mhi->ev_ctx_shadow.size; + + mhi_dev_read_from_host(mhi, &data_transfer); + + mhi_log(MHI_MSG_VERBOSE, + "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n", + mhi->cmd_ctx_cache->rbase, + mhi->cmd_ctx_cache->rp, + mhi->cmd_ctx_cache->wp); + mhi_log(MHI_MSG_VERBOSE, + "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n", + mhi_ctx->ev_ctx_cache->rbase, + mhi->ev_ctx_cache->rp, + mhi->ev_ctx_cache->wp); + + return mhi_ring_start(&mhi->ring[0], + (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi); +} + +int mhi_dev_suspend(struct mhi_dev *mhi) +{ + int ch_id = 0, rc = 0; + struct mhi_addr data_transfer; + + mutex_lock(&mhi_ctx->mhi_write_test); + atomic_set(&mhi->is_suspended, 1); + + for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) { + if (mhi->ch_ctx_cache[ch_id].ch_state != + MHI_DEV_CH_STATE_RUNNING) + continue; + + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED; + + if (mhi->use_ipa) { + data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } else { + data_transfer.device_va = mhi->ch_ctx_shadow.device_va + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } + + data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state); + data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state; + + /* update the channel state in the host */ + mhi_dev_write_to_host(mhi, &data_transfer, NULL, + MHI_DEV_DMA_SYNC); + + } + + atomic_set(&mhi->mhi_dev_wake, 0); + pm_relax(mhi->dev); + mhi_log(MHI_MSG_VERBOSE, "releasing mhi wakelock\n"); + + mutex_unlock(&mhi_ctx->mhi_write_test); + + return rc; +} +EXPORT_SYMBOL(mhi_dev_suspend); + +int mhi_dev_resume(struct mhi_dev *mhi) +{ + int ch_id = 0, rc = 0; + struct mhi_addr data_transfer; + + for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) { + if (mhi->ch_ctx_cache[ch_id].ch_state != + MHI_DEV_CH_STATE_SUSPENDED) + continue; + + mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING; + if (mhi->use_ipa) { + data_transfer.host_pa = mhi->ch_ctx_shadow.host_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } else { + data_transfer.device_va = mhi->ch_ctx_shadow.device_va + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + data_transfer.device_pa = mhi->ch_ctx_shadow.device_pa + + sizeof(struct mhi_dev_ch_ctx) * ch_id; + } + + data_transfer.size = sizeof(enum mhi_dev_ch_ctx_state); + data_transfer.virt_addr = &mhi->ch_ctx_cache[ch_id].ch_state; + + /* update the channel state in the host */ + mhi_dev_write_to_host(mhi, &data_transfer, NULL, + MHI_DEV_DMA_SYNC); + } + mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED); + + atomic_set(&mhi->is_suspended, 0); + + return rc; +} +EXPORT_SYMBOL(mhi_dev_resume); + +static int mhi_dev_ring_init(struct mhi_dev *dev) +{ + int i = 0; + + mhi_log(MHI_MSG_INFO, "initializing all rings"); + dev->cmd_ring_idx = 0; + dev->ev_ring_start = 1; + dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings; + + /* Initialize CMD ring */ + mhi_ring_init(&dev->ring[dev->cmd_ring_idx], + RING_TYPE_CMD, dev->cmd_ring_idx); + + mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx], + mhi_dev_process_cmd_ring); + + /* Initialize Event ring */ + for (i = dev->ev_ring_start; i < (dev->cfg.event_rings + + dev->ev_ring_start); i++) + mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i); + + /* Initialize CH */ + for (i = dev->ch_ring_start; i < (dev->cfg.channels + + dev->ch_ring_start); i++) { + mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i); + mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring); + } + + return 0; +} + +int mhi_dev_open_channel(uint32_t chan_id, + struct mhi_dev_client **handle_client, + void (*mhi_dev_client_cb_reason) + (struct mhi_dev_client_cb_reason *cb)) +{ + int rc = 0; + int i = 0; + struct mhi_dev_channel *ch; + struct platform_device *pdev; + + pdev = mhi_ctx->pdev; + ch = &mhi_ctx->ch[chan_id]; + + mutex_lock(&ch->ch_lock); + + if (ch->active_client) { + mhi_log(MHI_MSG_ERROR, + "Channel (%d) already opened by client\n", chan_id); + rc = -EINVAL; + goto exit; + } + + /* Initialize the channel, client and state information */ + *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL); + if (!(*handle_client)) { + dev_err(&pdev->dev, "can not allocate mhi_dev memory\n"); + rc = -ENOMEM; + goto exit; + } + + /* Pre allocate event requests */ + ch->ereqs = kcalloc(MHI_MAX_EVT_REQ, sizeof(*ch->ereqs), GFP_KERNEL); + if (!ch->ereqs) { + rc = -ENOMEM; + goto free_client; + } + /* pre allocate buffers to queue transfer completion events */ + ch->tr_events = kcalloc(MHI_MAX_EVT_REQ, + MAX_TR_EVENTS * sizeof(*ch->tr_events), + GFP_KERNEL); + if (!ch->tr_events) { + rc = -ENOMEM; + goto free_ereqs; + } + + /* + * Organize the above allocated event request block and + * completion event block into linked lists. Each event + * request includes a pointer to a block of MAX_TR_EVENTS + * completion events. + */ + INIT_LIST_HEAD(&mhi_ctx->ch[chan_id].event_req_buffers); + for (i = 0; i < MHI_MAX_EVT_REQ; ++i) { + ch->ereqs[i].tr_events = ch->tr_events + i * MAX_TR_EVENTS; + list_add_tail(&ch->ereqs[i].list, + &mhi_ctx->ch[chan_id].event_req_buffers); + } + mhi_ctx->ch[chan_id].curr_ereq = + container_of(mhi_ctx->ch[chan_id].event_req_buffers.next, + struct event_req, list); + list_del_init(&mhi_ctx->ch[chan_id].curr_ereq->list); + + ch->active_client = (*handle_client); + (*handle_client)->channel = ch; + (*handle_client)->event_trigger = mhi_dev_client_cb_reason; + + if (ch->state == MHI_DEV_CH_UNINT) { + ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start]; + ch->state = MHI_DEV_CH_PENDING_START; + } else if (ch->state == MHI_DEV_CH_CLOSED) + ch->state = MHI_DEV_CH_STARTED; + else if (ch->state == MHI_DEV_CH_STOPPED) + ch->state = MHI_DEV_CH_PENDING_START; + + goto exit; + +free_ereqs: + kfree(ch->ereqs); + ch->ereqs = NULL; +free_client: + kfree(*handle_client); +exit: + mutex_unlock(&ch->ch_lock); + return rc; +} +EXPORT_SYMBOL(mhi_dev_open_channel); + +int mhi_dev_channel_isempty(struct mhi_dev_client *handle) +{ + struct mhi_dev_channel *ch; + int rc; + + ch = handle->channel; + + rc = ch->ring->rd_offset == ch->ring->wr_offset; + + return rc; +} +EXPORT_SYMBOL(mhi_dev_channel_isempty); + +int mhi_dev_close_channel(struct mhi_dev_client *handle) +{ + struct mhi_dev_channel *ch; + int rc = 0; + + ch = handle->channel; + + mutex_lock(&ch->ch_lock); + if (ch->state != MHI_DEV_CH_PENDING_START) { + if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL && + !mhi_dev_channel_isempty(handle)) { + mhi_log(MHI_MSG_ERROR, + "Trying to close an active channel (%d)\n", + ch->ch_id); + rc = -EAGAIN; + goto exit; + } else if (ch->tre_loc) { + mhi_log(MHI_MSG_ERROR, + "Trying to close channel (%d) when a TRE is active", + ch->ch_id); + rc = -EAGAIN; + goto exit; + } + } + + ch->state = MHI_DEV_CH_CLOSED; + ch->active_client = NULL; + kfree(ch->ereqs); + kfree(ch->tr_events); + ch->ereqs = NULL; + ch->tr_events = NULL; + kfree(handle); +exit: + mutex_unlock(&ch->ch_lock); + return rc; +} +EXPORT_SYMBOL(mhi_dev_close_channel); + +static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch, + struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el, + uint32_t *chain) +{ + uint32_t td_done = 0; + + /* + * A full TRE worth of data was consumed. + * Check if we are at a TD boundary. + */ + if (ch->tre_bytes_left == 0) { + if (el->tre.chain) { + if (el->tre.ieob) + mhi_dev_send_completion_event(ch, + ring->rd_offset, el->tre.len, + MHI_CMD_COMPL_CODE_EOB); + *chain = 1; + } else { + if (el->tre.ieot) + mhi_dev_send_completion_event( + ch, ring->rd_offset, el->tre.len, + MHI_CMD_COMPL_CODE_EOT); + td_done = 1; + *chain = 0; + } + mhi_dev_ring_inc_index(ring, ring->rd_offset); + ch->tre_bytes_left = 0; + ch->tre_loc = 0; + } + + return td_done; +} + +int mhi_dev_read_channel(struct mhi_req *mreq) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + union mhi_dev_ring_element_type *el; + size_t bytes_to_read, addr_offset; + uint64_t read_from_loc; + ssize_t bytes_read = 0; + size_t write_to_loc = 0; + uint32_t usr_buf_remaining; + int td_done = 0, rc = 0; + struct mhi_dev_client *handle_client; + + if (WARN_ON(!mreq)) + return -ENXIO; + + if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) { + pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info); + return -ENODEV; + } + + if (!mreq->client) { + mhi_log(MHI_MSG_ERROR, "invalid mhi request\n"); + return -ENXIO; + } + handle_client = mreq->client; + ch = handle_client->channel; + usr_buf_remaining = mreq->len; + ring = ch->ring; + mreq->chain = 0; + + mutex_lock(&ch->ch_lock); + + do { + el = &ring->ring_cache[ring->rd_offset]; + mhi_log(MHI_MSG_VERBOSE, "evtptr : 0x%llx\n", + el->tre.data_buf_ptr); + mhi_log(MHI_MSG_VERBOSE, "evntlen : 0x%x, offset:%d\n", + el->tre.len, ring->rd_offset); + + if (ch->tre_loc) { + bytes_to_read = min(usr_buf_remaining, + ch->tre_bytes_left); + mreq->chain = 1; + mhi_log(MHI_MSG_VERBOSE, + "remaining buffered data size %d\n", + (int) ch->tre_bytes_left); + } else { + if (ring->rd_offset == ring->wr_offset) { + mhi_log(MHI_MSG_VERBOSE, + "nothing to read, returning\n"); + bytes_read = 0; + goto exit; + } + + if (ch->state == MHI_DEV_CH_STOPPED) { + mhi_log(MHI_MSG_VERBOSE, + "channel (%d) already stopped\n", + mreq->chan); + bytes_read = -1; + goto exit; + } + + ch->tre_loc = el->tre.data_buf_ptr; + ch->tre_size = el->tre.len; + ch->tre_bytes_left = ch->tre_size; + + mhi_log(MHI_MSG_VERBOSE, + "user_buf_remaining %d, ch->tre_size %d\n", + usr_buf_remaining, ch->tre_size); + bytes_to_read = min(usr_buf_remaining, ch->tre_size); + } + + bytes_read += bytes_to_read; + addr_offset = ch->tre_size - ch->tre_bytes_left; + read_from_loc = ch->tre_loc + addr_offset; + write_to_loc = (uint32_t) mreq->buf + + (mreq->len - usr_buf_remaining); + ch->tre_bytes_left -= bytes_to_read; + mreq->el = el; + mreq->actual_len = bytes_read; + mreq->rd_offset = ring->rd_offset; + mhi_log(MHI_MSG_VERBOSE, "reading %d bytes from chan %d\n", + bytes_to_read, mreq->chan); + rc = mhi_transfer_host_to_device((void *) write_to_loc, + read_from_loc, bytes_to_read, mhi_ctx, mreq); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error while reading chan (%d) rc %d\n", + mreq->chan, rc); + mutex_unlock(&ch->ch_lock); + return rc; + } + usr_buf_remaining -= bytes_to_read; + + if (mreq->mode == IPA_DMA_ASYNC) { + ch->tre_bytes_left = 0; + ch->tre_loc = 0; + goto exit; + } else { + td_done = mhi_dev_check_tre_bytes_left(ch, ring, + el, &mreq->chain); + } + } while (usr_buf_remaining && !td_done); + if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) { + ch->state = MHI_DEV_CH_STOPPED; + rc = mhi_dev_process_stop_cmd(ring, mreq->chan, mhi_ctx); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error while stopping channel (%d)\n", + mreq->chan); + bytes_read = -EIO; + } + } +exit: + mutex_unlock(&ch->ch_lock); + return bytes_read; +} +EXPORT_SYMBOL(mhi_dev_read_channel); + +static void skip_to_next_td(struct mhi_dev_channel *ch) +{ + struct mhi_dev_ring *ring = ch->ring; + union mhi_dev_ring_element_type *el; + uint32_t td_boundary_reached = 0; + + ch->skip_td = 1; + el = &ring->ring_cache[ring->rd_offset]; + while (ring->rd_offset != ring->wr_offset) { + if (td_boundary_reached) { + ch->skip_td = 0; + break; + } + if (!el->tre.chain) + td_boundary_reached = 1; + mhi_dev_ring_inc_index(ring, ring->rd_offset); + el = &ring->ring_cache[ring->rd_offset]; + } +} + +int mhi_dev_write_channel(struct mhi_req *wreq) +{ + struct mhi_dev_channel *ch; + struct mhi_dev_ring *ring; + struct mhi_dev_client *handle_client; + union mhi_dev_ring_element_type *el; + enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID; + int rc = 0; + uint64_t skip_tres = 0, write_to_loc; + size_t read_from_loc; + uint32_t usr_buf_remaining; + size_t usr_buf_offset = 0; + size_t bytes_to_write = 0; + size_t bytes_written = 0; + uint32_t tre_len = 0, suspend_wait_timeout = 0; + + if (WARN_ON(!wreq || !wreq->client || !wreq->buf)) { + pr_err("%s: invalid parameters\n", __func__); + return -ENXIO; + } + + if (mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) { + pr_err("Channel not connected:%d\n", mhi_ctx->ctrl_info); + return -ENODEV; + } + + usr_buf_remaining = wreq->len; + mutex_lock(&mhi_ctx->mhi_write_test); + + if (atomic_read(&mhi_ctx->is_suspended)) { + /* + * Expected usage is when there is a write + * to the MHI core -> notify SM. + */ + rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP); + if (rc) { + pr_err("error sending core wakeup event\n"); + mutex_unlock(&mhi_ctx->mhi_write_test); + return rc; + } + } + + while (atomic_read(&mhi_ctx->is_suspended) && + suspend_wait_timeout < MHI_WAKEUP_TIMEOUT_CNT) { + /* wait for the suspend to finish */ + msleep(MHI_SUSPEND_MIN); + suspend_wait_timeout++; + } + + if (suspend_wait_timeout >= MHI_WAKEUP_TIMEOUT_CNT || + mhi_ctx->ctrl_info != MHI_STATE_CONNECTED) { + pr_err("Failed to wake up core\n"); + mutex_unlock(&mhi_ctx->mhi_write_test); + return -ENODEV; + } + + handle_client = wreq->client; + ch = handle_client->channel; + ch->wr_request_active = true; + + ring = ch->ring; + + mutex_lock(&ch->ch_lock); + + if (ch->state == MHI_DEV_CH_STOPPED) { + mhi_log(MHI_MSG_ERROR, + "channel %d already stopped\n", wreq->chan); + bytes_written = -1; + goto exit; + } + + if (ch->state == MHI_DEV_CH_PENDING_STOP) { + if (mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx) < 0) + bytes_written = -1; + goto exit; + } + + if (ch->skip_td) + skip_to_next_td(ch); + + do { + if (ring->rd_offset == ring->wr_offset) { + mhi_log(MHI_MSG_ERROR, + "%s():rd & wr offsets are equal\n", + __func__); + mhi_log(MHI_MSG_INFO, "No TREs available\n"); + break; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = el->tre.len; + if (wreq->len > tre_len) { + pr_err("%s(): rlen = %d, tlen = %d: client buf > tre len\n", + __func__, wreq->len, tre_len); + bytes_written = -ENOMEM; + goto exit; + } + + bytes_to_write = min(usr_buf_remaining, tre_len); + usr_buf_offset = wreq->len - bytes_to_write; + read_from_loc = (uint32_t) wreq->buf + usr_buf_offset; + write_to_loc = el->tre.data_buf_ptr; + wreq->rd_offset = ring->rd_offset; + wreq->el = el; + rc = mhi_transfer_device_to_host(write_to_loc, + (void *) read_from_loc, + bytes_to_write, + mhi_ctx, wreq); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error while writing chan (%d) rc %d\n", + wreq->chan, rc); + goto exit; + } + bytes_written += bytes_to_write; + usr_buf_remaining -= bytes_to_write; + + if (usr_buf_remaining) { + if (!el->tre.chain) + code = MHI_CMD_COMPL_CODE_OVERFLOW; + else if (el->tre.ieob) + code = MHI_CMD_COMPL_CODE_EOB; + } else { + if (el->tre.chain) + skip_tres = 1; + code = MHI_CMD_COMPL_CODE_EOT; + } + if (wreq->mode == IPA_DMA_SYNC) { + rc = mhi_dev_send_completion_event(ch, + ring->rd_offset, bytes_to_write, code); + if (rc) + mhi_log(MHI_MSG_VERBOSE, + "err in snding cmpl evt ch:%d\n", + wreq->chan); + mhi_dev_ring_inc_index(ring, ring->rd_offset); + } + + if (ch->state == MHI_DEV_CH_PENDING_STOP) + break; + + } while (!skip_tres && usr_buf_remaining); + + if (skip_tres) + skip_to_next_td(ch); + + if (ch->state == MHI_DEV_CH_PENDING_STOP) { + rc = mhi_dev_process_stop_cmd(ring, wreq->chan, mhi_ctx); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "channel %d stop failed\n", wreq->chan); + } + } +exit: + ch->wr_request_active = false; + mutex_unlock(&ch->ch_lock); + mutex_unlock(&mhi_ctx->mhi_write_test); + return bytes_written; +} +EXPORT_SYMBOL(mhi_dev_write_channel); + +static void mhi_dev_enable(struct work_struct *work) +{ + int rc = 0; + struct ep_pcie_msi_config msi_cfg; + struct mhi_dev *mhi = container_of(work, + struct mhi_dev, ring_init_cb_work); + bool mhi_reset; + enum mhi_dev_state state; + uint32_t max_cnt = 0, bhi_intvec = 0; + + if (mhi->use_ipa) { + rc = ipa_dma_init(); + if (rc) { + pr_err("ipa dma init failed\n"); + return; + } + + rc = ipa_dma_enable(); + if (rc) { + pr_err("ipa enable failed\n"); + return; + } + } + + rc = mhi_dev_ring_init(mhi); + if (rc) { + pr_err("MHI dev ring init failed\n"); + return; + } + + /*Enable MHI dev network stack Interface*/ + rc = mhi_dev_net_interface_init(); + if (rc) + pr_err("%s Failed to initialize mhi_dev_net iface\n", __func__); + + rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec); + if (rc) + return; + + if (bhi_intvec != 0xffffffff) { + /* Indicate the host that the device is ready */ + rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg); + if (!rc) { + rc = ep_pcie_trigger_msi(mhi_ctx->phandle, bhi_intvec); + if (rc) { + pr_err("%s: error sending msi\n", __func__); + return; + } + } else { + pr_err("MHI: error geting msi configs\n"); + } + } + + rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + return; + } + + while (state != MHI_DEV_M0_STATE && max_cnt < MHI_SUSPEND_TIMEOUT) { + /* Wait for Host to set the M0 state */ + msleep(MHI_SUSPEND_MIN); + rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset); + if (rc) { + pr_err("%s: get mhi state failed\n", __func__); + return; + } + max_cnt++; + } + + mhi_log(MHI_MSG_INFO, "state:%d\n", state); + + if (state == MHI_DEV_M0_STATE) { + rc = mhi_dev_cache_host_cfg(mhi); + if (rc) { + pr_err("Failed to cache the host config\n"); + return; + } + + rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE); + if (rc) { + pr_err("%s: env setting failed\n", __func__); + return; + } + } else { + pr_err("MHI device failed to enter M0\n"); + return; + } + + rc = mhi_hwc_init(mhi_ctx); + if (rc) { + pr_err("error during hwc_init\n"); + return; + } + + if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) { + mhi_ctx->mhi_int_en = true; + enable_irq(mhi_ctx->mhi_irq); + } + + mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED); +} + +static void mhi_ring_init_cb(void *data) +{ + struct mhi_dev *mhi = data; + + if (WARN_ON(!mhi)) + return; + + queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work); +} + +int mhi_register_state_cb(void (*mhi_state_cb) + (struct mhi_dev_client_cb_data *cb_data), + void *data, enum mhi_client_channel channel) +{ + struct mhi_dev_ready_cb_info *cb_info = NULL; + + if (WARN_ON(!mhi_ctx)) + return -ENXIO; + + if (channel > MHI_MAX_CHANNELS) { + pr_err("Invalid channel :%d\n", channel); + return -EINVAL; + } + + mutex_lock(&mhi_ctx->mhi_lock); + cb_info = kmalloc(sizeof(*cb_info), GFP_KERNEL); + if (!cb_info) { + mutex_unlock(&mhi_ctx->mhi_lock); + return -ENOMEM; + } + + cb_info->cb = mhi_state_cb; + cb_info->cb_data.user_data = data; + cb_info->cb_data.channel = channel; + + list_add_tail(&cb_info->list, &mhi_ctx->client_cb_list); + + /** + * If channel is open during registration, no callback is issued. + * Instead return -EEXIST to notify the client. Clients request + * is added to the list to notify future state change notification. + * Channel struct may not be allocated yet if this function is called + * early during boot - add an explicit check for non-null "ch". + */ + if (mhi_ctx->ch && (mhi_ctx->ch[channel].state == MHI_DEV_CH_STARTED)) { + mutex_unlock(&mhi_ctx->mhi_lock); + return -EEXIST; + } + + mutex_unlock(&mhi_ctx->mhi_lock); + + return 0; +} +EXPORT_SYMBOL(mhi_register_state_cb); + +static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info) +{ + struct mhi_dev_client_cb_reason reason; + + if (uevent_idx == MHI_DEV_UEVENT_CTRL) + mhi_ctx->ctrl_info = info; + + channel_state_info[uevent_idx].ctrl_info = info; + + if (uevent_idx == MHI_CLIENT_QMI_OUT || + uevent_idx == MHI_CLIENT_QMI_IN) { + /* For legacy reasons for QTI client */ + reason.reason = MHI_DEV_CTRL_UPDATE; + uci_ctrl_update(&reason); + } + +} + +int mhi_ctrl_state_info(uint32_t idx, uint32_t *info) +{ + if (idx == MHI_DEV_UEVENT_CTRL) + *info = mhi_ctx->ctrl_info; + else + if (idx < MHI_MAX_CHANNELS) + *info = channel_state_info[idx].ctrl_info; + else + return -EINVAL; + + mhi_log(MHI_MSG_VERBOSE, "idx:%d, ctrl:%d", idx, *info); + + return 0; +} +EXPORT_SYMBOL(mhi_ctrl_state_info); + +static int get_device_tree_data(struct platform_device *pdev) +{ + struct mhi_dev *mhi; + int rc = 0; + struct resource *res_mem = NULL; + + mhi = devm_kzalloc(&pdev->dev, + sizeof(struct mhi_dev), GFP_KERNEL); + if (!mhi) + return -ENOMEM; + + mhi->pdev = pdev; + mhi->dev = &pdev->dev; + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "mhi_mmio_base"); + if (!res_mem) { + pr_err("Request MHI MMIO physical memory region failed\n"); + return -EINVAL; + } + + mhi->mmio_base_pa_addr = res_mem->start; + mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE); + if (!mhi->mmio_base_addr) { + pr_err("Failed to IO map MMIO registers\n"); + return -EINVAL; + } + + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "ipa_uc_mbox_crdb"); + if (!res_mem) { + pr_err("Request IPA_UC_MBOX CRDB physical region failed\n"); + rc = -EINVAL; + goto err; + } + + mhi->ipa_uc_mbox_crdb = res_mem->start; + + res_mem = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "ipa_uc_mbox_erdb"); + if (!res_mem) { + pr_err("Request IPA_UC_MBOX ERDB physical region failed\n"); + rc = -EINVAL; + goto err; + } + + mhi->ipa_uc_mbox_erdb = res_mem->start; + mhi_ctx = mhi; + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-ifc-id", + &mhi_ctx->ifc_id); + if (rc) { + pr_err("qcom,mhi-ifc-id does not exist\n"); + goto err; + } + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-ep-msi", + &mhi_ctx->mhi_ep_msi_num); + if (rc) { + pr_err("qcom,mhi-ep-msi does not exist\n"); + goto err; + } + + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-version", + &mhi_ctx->mhi_version); + if (rc) { + pr_err("qcom,mhi-version does not exist\n"); + goto err; + } + + mhi_ctx->use_ipa = of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-ipa-software-channel"); + + mhi_ctx->config_iatu = of_property_read_bool((&pdev->dev)->of_node, + "qcom,mhi-config-iatu"); + + if (mhi_ctx->config_iatu) { + rc = of_property_read_u32((&pdev->dev)->of_node, + "qcom,mhi-local-pa-base", + &mhi_ctx->device_local_pa_base); + if (rc) { + pr_err("qcom,mhi-local-pa-base does not exist\n"); + goto err; + } + } + + mhi_ctx->mhi_int = of_property_read_bool((&pdev->dev)->of_node, + "qcom,mhi-interrupt"); + + if (mhi->config_iatu || mhi_ctx->mhi_int) { + mhi->mhi_irq = platform_get_irq_byname(pdev, "mhi-device-inta"); + if (mhi->mhi_irq < 0) { + pr_err("Invalid MHI device interrupt\n"); + rc = mhi->mhi_irq; + goto err; + } + } + + device_init_wakeup(mhi->dev, true); + /* MHI device will be woken up from PCIe event */ + device_set_wakeup_capable(mhi->dev, false); + /* Hold a wakelock until completion of M0 */ + pm_stay_awake(mhi->dev); + atomic_set(&mhi->mhi_dev_wake, 1); + + mhi_log(MHI_MSG_VERBOSE, "acquiring wakelock\n"); + + return 0; +err: + iounmap(mhi->mmio_base_addr); + return rc; +} + +static int mhi_deinit(struct mhi_dev *mhi) +{ + int i = 0, ring_id = 0; + struct mhi_dev_ring *ring; + struct platform_device *pdev = mhi->pdev; + + ring_id = mhi->cfg.channels + mhi->cfg.event_rings + 1; + + for (i = 0; i < ring_id; i++) { + ring = &mhi->ring[i]; + if (ring->state == RING_STATE_UINT) + continue; + + dma_free_coherent(mhi->dev, ring->ring_size * + sizeof(union mhi_dev_ring_element_type), + ring->ring_cache, + ring->ring_cache_dma_handle); + } + + for (i = 0; i < mhi->cfg.channels; i++) + mutex_destroy(&mhi->ch[i].ch_lock); + + devm_kfree(&pdev->dev, mhi->mmio_backup); + devm_kfree(&pdev->dev, mhi->ch); + devm_kfree(&pdev->dev, mhi->ring); + + mhi_dev_sm_exit(mhi); + + mhi->mmio_initialized = false; + + return 0; +} + +static int mhi_init(struct mhi_dev *mhi) +{ + int rc = 0, i = 0; + struct platform_device *pdev = mhi->pdev; + + rc = mhi_dev_mmio_init(mhi); + if (rc) { + pr_err("Failed to update the MMIO init\n"); + return rc; + } + + mhi->ring = devm_kzalloc(&pdev->dev, + (sizeof(struct mhi_dev_ring) * + (mhi->cfg.channels + mhi->cfg.event_rings + 1)), + GFP_KERNEL); + if (!mhi->ring) + return -ENOMEM; + + mhi->ch = devm_kzalloc(&pdev->dev, + (sizeof(struct mhi_dev_channel) * + (mhi->cfg.channels)), GFP_KERNEL); + if (!mhi->ch) + return -ENOMEM; + + for (i = 0; i < mhi->cfg.channels; i++) + mutex_init(&mhi->ch[i].ch_lock); + + spin_lock_init(&mhi->lock); + mhi->mmio_backup = devm_kzalloc(&pdev->dev, + MHI_DEV_MMIO_RANGE, GFP_KERNEL); + if (!mhi->mmio_backup) + return -ENOMEM; + + return 0; +} + +static int mhi_dev_resume_mmio_mhi_reinit(struct mhi_dev *mhi_ctx) +{ + int rc = 0; + + mutex_lock(&mhi_ctx->mhi_lock); + if (atomic_read(&mhi_ctx->re_init_done)) { + mhi_log(MHI_MSG_INFO, "Re_init done, return\n"); + mutex_unlock(&mhi_ctx->mhi_lock); + return 0; + } + + rc = mhi_init(mhi_ctx); + if (rc) { + pr_err("Error initializing MHI MMIO with %d\n", rc); + goto fail; + } + + mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT | + EP_PCIE_EVENT_PM_D3_COLD | + EP_PCIE_EVENT_PM_D0 | + EP_PCIE_EVENT_PM_RST_DEAST | + EP_PCIE_EVENT_MHI_A7 | + EP_PCIE_EVENT_LINKDOWN; + mhi_ctx->event_reg.user = mhi_ctx; + mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK; + mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler; + + rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + goto fail; + } + + rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx); + if (rc < 0) { + if (rc == -EEXIST) { + mhi_ring_init_cb(mhi_ctx); + } else { + pr_err("Error calling IPA cb with %d\n", rc); + goto fail; + } + } + + /* Invoke MHI SM when device is in RESET state */ + rc = mhi_dev_sm_init(mhi_ctx); + if (rc) { + pr_err("%s: Error during SM init\n", __func__); + goto fail; + } + + /* set the env before setting the ready bit */ + rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE); + if (rc) { + pr_err("%s: env setting failed\n", __func__); + goto fail; + } + + /* All set, notify the host */ + rc = mhi_dev_sm_set_ready(); + if (rc) { + pr_err("%s: unable to set ready bit\n", __func__); + goto fail; + } + + atomic_set(&mhi_ctx->is_suspended, 0); +fail: + atomic_set(&mhi_ctx->re_init_done, 1); + mutex_unlock(&mhi_ctx->mhi_lock); + return rc; +} + +static void mhi_dev_reinit(struct work_struct *work) +{ + struct mhi_dev *mhi_ctx = container_of(work, + struct mhi_dev, re_init); + enum ep_pcie_link_status link_state; + int rc = 0; + + link_state = ep_pcie_get_linkstatus(mhi_ctx->phandle); + if (link_state == EP_PCIE_LINK_ENABLED) { + /* PCIe link is up with BME set */ + rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + return; + } + } + + mhi_log(MHI_MSG_VERBOSE, "Wait for PCIe linkup\n"); +} + +static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx) +{ + struct platform_device *pdev; + int rc = 0; + + pdev = mhi_ctx->pdev; + + INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler); + + mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq", + WQ_HIGHPRI, 0); + if (!mhi_ctx->pending_ring_wq) { + rc = -ENOMEM; + return rc; + } + + INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending); + + INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable); + + INIT_WORK(&mhi_ctx->re_init, mhi_dev_reinit); + + mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq", + WQ_HIGHPRI, 0); + if (!mhi_ctx->ring_init_wq) { + rc = -ENOMEM; + return rc; + } + + INIT_LIST_HEAD(&mhi_ctx->event_ring_list); + INIT_LIST_HEAD(&mhi_ctx->process_ring_list); + mutex_init(&mhi_ctx->mhi_event_lock); + mutex_init(&mhi_ctx->mhi_write_test); + + rc = mhi_init(mhi_ctx); + if (rc) + return rc; + + mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 4), + &mhi_ctx->cache_dma_handle, GFP_KERNEL); + if (!mhi_ctx->dma_cache) + return -ENOMEM; + + mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 4), + &mhi_ctx->read_dma_handle, + GFP_KERNEL); + if (!mhi_ctx->read_handle) + return -ENOMEM; + + mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev, + (TRB_MAX_DATA_SIZE * 24), + &mhi_ctx->write_dma_handle, + GFP_KERNEL); + if (!mhi_ctx->write_handle) + return -ENOMEM; + + rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version); + if (rc) { + pr_err("Failed to update the MHI version\n"); + return rc; + } + + mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id); + if (!mhi_ctx->phandle) { + pr_err("PCIe driver get handle failed.\n"); + return -EINVAL; + } + + mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT | + EP_PCIE_EVENT_PM_D3_COLD | + EP_PCIE_EVENT_PM_D0 | + EP_PCIE_EVENT_PM_RST_DEAST | + EP_PCIE_EVENT_MHI_A7 | + EP_PCIE_EVENT_LINKDOWN; + mhi_ctx->event_reg.user = mhi_ctx; + mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK; + mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler; + + rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + return rc; + } + + pr_err("Registering with IPA\n"); + + rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx); + if (rc < 0) { + if (rc == -EEXIST) { + mhi_ring_init_cb(mhi_ctx); + } else { + pr_err("Error calling IPA cb with %d\n", rc); + return rc; + } + } + + /* Invoke MHI SM when device is in RESET state */ + rc = mhi_dev_sm_init(mhi_ctx); + if (rc) { + pr_err("%s: Error during SM init\n", __func__); + return rc; + } + + /* set the env before setting the ready bit */ + rc = mhi_dev_mmio_set_env(mhi_ctx, MHI_ENV_VALUE); + if (rc) { + pr_err("%s: env setting failed\n", __func__); + return rc; + } + + /* All set, notify the host */ + mhi_dev_sm_set_ready(); + + if (mhi_ctx->config_iatu || mhi_ctx->mhi_int) { + rc = devm_request_irq(&pdev->dev, mhi_ctx->mhi_irq, mhi_dev_isr, + IRQF_TRIGGER_HIGH, "mhi_isr", mhi_ctx); + if (rc) { + dev_err(&pdev->dev, "request mhi irq failed %d\n", rc); + return -EINVAL; + } + + disable_irq(mhi_ctx->mhi_irq); + } + + return 0; +} + +static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify) +{ + if (!notify || !notify->user) { + pr_err("Null argument for notify\n"); + return; + } + + mhi_ctx = notify->user; + mhi_dev_pcie_notify_event = notify->options; + mhi_log(MHI_MSG_INFO, + "PCIe event=0x%x\n", notify->options); + queue_work(mhi_ctx->pcie_event_wq, &mhi_ctx->pcie_event); +} + +static void mhi_dev_pcie_handle_event(struct work_struct *work) +{ + struct mhi_dev *mhi_ctx = container_of(work, struct mhi_dev, + pcie_event); + int rc = 0; + + if (mhi_dev_pcie_notify_event == MHI_INIT) { + rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx); + if (rc) { + pr_err("Error during MHI device initialization\n"); + return; + } + } else if (mhi_dev_pcie_notify_event == MHI_REINIT) { + rc = mhi_dev_resume_mmio_mhi_reinit(mhi_ctx); + if (rc) { + pr_err("Error during MHI device re-initialization\n"); + return; + } + } +} + +static int mhi_dev_probe(struct platform_device *pdev) +{ + int rc = 0; + + if (pdev->dev.of_node) { + rc = get_device_tree_data(pdev); + if (rc) { + pr_err("Error reading MHI Dev DT\n"); + return rc; + } + mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, + "mhi", 0); + if (mhi_ipc_log == NULL) { + dev_err(&pdev->dev, + "Failed to create IPC logging context\n"); + } + /* + * The below list and mutex should be initialized + * before calling mhi_uci_init to avoid crash in + * mhi_register_state_cb when accessing these. + */ + INIT_LIST_HEAD(&mhi_ctx->client_cb_list); + mutex_init(&mhi_ctx->mhi_lock); + + mhi_uci_init(); + mhi_update_state_info(MHI_DEV_UEVENT_CTRL, + MHI_STATE_CONFIGURED); + } + + INIT_WORK(&mhi_ctx->pcie_event, mhi_dev_pcie_handle_event); + mhi_ctx->pcie_event_wq = alloc_workqueue("mhi_dev_pcie_event_wq", + WQ_HIGHPRI, 0); + if (!mhi_ctx->pcie_event_wq) { + pr_err("no memory\n"); + rc = -ENOMEM; + return rc; + } + + mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id); + if (mhi_ctx->phandle) { + /* PCIe link is already up */ + rc = mhi_dev_resume_mmio_mhi_init(mhi_ctx); + if (rc) { + pr_err("Error during MHI device initialization\n"); + return rc; + } + } else { + pr_debug("Register a PCIe callback\n"); + mhi_ctx->event_reg.events = EP_PCIE_EVENT_LINKUP; + mhi_ctx->event_reg.user = mhi_ctx; + mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK; + mhi_ctx->event_reg.callback = mhi_dev_resume_init_with_link_up; + mhi_ctx->event_reg.options = MHI_INIT; + + rc = ep_pcie_register_event(mhi_ctx->phandle, + &mhi_ctx->event_reg); + if (rc) { + pr_err("Failed to register for events from PCIe\n"); + return rc; + } + } + + return 0; +} + +static int mhi_dev_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id mhi_dev_match_table[] = { + { .compatible = "qcom,msm-mhi-dev" }, + {} +}; + +static struct platform_driver mhi_dev_driver = { + .driver = { + .name = "qcom,msm-mhi-dev", + .of_match_table = mhi_dev_match_table, + }, + .probe = mhi_dev_probe, + .remove = mhi_dev_remove, +}; + +module_param(mhi_msg_lvl, uint, 0644); +module_param(mhi_ipc_msg_lvl, uint, 0644); + +MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl"); +MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl"); + +static int __init mhi_dev_init(void) +{ + return platform_driver_register(&mhi_dev_driver); +} +module_init(mhi_dev_init); + +static void __exit mhi_dev_exit(void) +{ + platform_driver_unregister(&mhi_dev_driver); +} +module_exit(mhi_dev_exit); + +MODULE_DESCRIPTION("MHI device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h new file mode 100644 index 0000000000000000000000000000000000000000..6cb2d7d270d3b4339e550391c122474d6327ebc3 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi.h @@ -0,0 +1,1090 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MHI_H +#define __MHI_H + +#include +#include +#include + +/** + * MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings. + */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 100 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; + +struct mhi_dev_channel; + +struct mhi_dev_ring { + struct list_head list; + struct mhi_dev *mhi_dev; + + uint32_t id; + uint32_t rd_offset; + uint32_t wr_offset; + uint32_t ring_size; + + enum mhi_dev_ring_type type; + enum mhi_dev_ring_state state; + + /* device virtual address location of the cached host ring ctx data */ + union mhi_dev_ring_element_type *ring_cache; + /* Physical address of the cached ring copy on the device side */ + dma_addr_t ring_cache_dma_handle; + /* Physical address of the host where we will write/read to/from */ + struct mhi_addr ring_shadow; + /* Ring type - cmd, event, transfer ring and its rp/wp... */ + union mhi_dev_ring_ctx *ring_ctx; + /* ring_ctx_shadow -> tracking ring_ctx in the host */ + union mhi_dev_ring_ctx *ring_ctx_shadow; + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, + void *ctx); +}; + +static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, + uint32_t rd_offset) +{ + ring->rd_offset++; + if (ring->rd_offset == ring->ring_size) + ring->rd_offset = 0; +} + +/* trace information planned to use for read/write */ +#define TRACE_DATA_MAX 128 +#define MHI_DEV_DATA_MAX 512 + +#define MHI_DEV_MMIO_RANGE 0xc80 + +struct ring_cache_req { + struct completion *done; + void *context; +}; + +struct event_req { + union mhi_dev_ring_element_type *tr_events; + u32 num_events; + dma_addr_t dma; + u32 dma_len; + dma_addr_t event_rd_dma; + void *context; + enum mhi_dev_tr_compl_evt_type event_type; + u32 event_ring; + void (*client_cb)(void *req); + struct list_head list; +}; + +struct mhi_dev_channel { + struct list_head list; + struct list_head clients; + /* synchronization for changing channel state, + * adding/removing clients, mhi_dev callbacks, etc + */ + struct mhi_dev_ring *ring; + + enum mhi_dev_channel_state state; + uint32_t ch_id; + enum mhi_dev_ch_ctx_type ch_type; + struct mutex ch_lock; + /* client which the current inbound/outbound message is for */ + struct mhi_dev_client *active_client; + /* + * Pointer to event request structs used to temporarily store + * completion events and meta data before sending them to host + */ + struct event_req *ereqs; + /* Pointer to completion event buffers */ + union mhi_dev_ring_element_type *tr_events; + struct list_head event_req_buffers; + struct event_req *curr_ereq; + + /* current TRE being processed */ + uint64_t tre_loc; + /* current TRE size */ + uint32_t tre_size; + /* tre bytes left to read/write */ + uint32_t tre_bytes_left; + /* td size being read/written from/to so far */ + uint32_t td_size; + bool wr_request_active; + bool skip_td; +}; + +/* Structure device for mhi dev */ +struct mhi_dev { + struct platform_device *pdev; + struct device *dev; + /* MHI MMIO related members */ + phys_addr_t mmio_base_pa_addr; + void *mmio_base_addr; + phys_addr_t ipa_uc_mbox_crdb; + phys_addr_t ipa_uc_mbox_erdb; + + uint32_t *mmio_backup; + struct mhi_config cfg; + bool mmio_initialized; + + spinlock_t lock; + /* Host control base information */ + struct mhi_host_addr host_addr; + struct mhi_addr ctrl_base; + struct mhi_addr data_base; + struct mhi_addr ch_ctx_shadow; + struct mhi_dev_ch_ctx *ch_ctx_cache; + dma_addr_t ch_ctx_cache_dma_handle; + struct mhi_addr ev_ctx_shadow; + struct mhi_dev_ch_ctx *ev_ctx_cache; + dma_addr_t ev_ctx_cache_dma_handle; + + struct mhi_addr cmd_ctx_shadow; + struct mhi_dev_ch_ctx *cmd_ctx_cache; + dma_addr_t cmd_ctx_cache_dma_handle; + struct mhi_dev_ring *ring; + int mhi_irq; + struct mhi_dev_channel *ch; + + int ctrl_int; + int cmd_int; + /* CHDB and EVDB device interrupt state */ + struct mhi_interrupt_state chdb[4]; + struct mhi_interrupt_state evdb[4]; + + /* Scheduler work */ + struct work_struct chdb_ctrl_work; + + struct mutex mhi_lock; + struct mutex mhi_event_lock; + + /* process a ring element */ + struct workqueue_struct *pending_ring_wq; + struct work_struct pending_work; + + struct list_head event_ring_list; + struct list_head process_ring_list; + + uint32_t cmd_ring_idx; + uint32_t ev_ring_start; + uint32_t ch_ring_start; + + /* IPA Handles */ + u32 ipa_clnt_hndl[4]; + struct workqueue_struct *ring_init_wq; + struct work_struct ring_init_cb_work; + struct work_struct re_init; + + /* EP PCIe registration */ + struct workqueue_struct *pcie_event_wq; + struct ep_pcie_register_event event_reg; + u32 ifc_id; + struct ep_pcie_hw *phandle; + struct work_struct pcie_event; + struct ep_pcie_msi_config msi_cfg; + + atomic_t write_active; + atomic_t is_suspended; + atomic_t mhi_dev_wake; + atomic_t re_init_done; + struct mutex mhi_write_test; + u32 device_local_pa_base; + u32 mhi_ep_msi_num; + u32 mhi_version; + void *dma_cache; + void *read_handle; + void *write_handle; + /* Physical scratch buffer for writing control data to the host */ + dma_addr_t cache_dma_handle; + /* + * Physical scratch buffer address used when picking host data + * from the host used in mhi_read() + */ + dma_addr_t read_dma_handle; + /* + * Physical scratch buffer address used when writing to the host + * region from device used in mhi_write() + */ + dma_addr_t write_dma_handle; + + /* Use IPA DMA for Software channel data transfer */ + bool use_ipa; + + /* iATU is required to map control and data region */ + bool config_iatu; + + /* MHI state info */ + enum mhi_ctrl_info ctrl_info; + + /*Register for interrupt*/ + bool mhi_int; + bool mhi_int_en; + /* Registered client callback list */ + struct list_head client_cb_list; + + struct kobj_uevent_env kobj_env; +}; + + +enum mhi_msg_level { + MHI_MSG_VERBOSE = 0x0, + MHI_MSG_INFO = 0x1, + MHI_MSG_DBG = 0x2, + MHI_MSG_WARNING = 0x3, + MHI_MSG_ERROR = 0x4, + MHI_MSG_CRITICAL = 0x5, + MHI_MSG_reserved = 0x80000000 +}; + +extern enum mhi_msg_level mhi_msg_lvl; +extern enum mhi_msg_level mhi_ipc_msg_lvl; +extern void *mhi_ipc_log; + +#define mhi_log(_msg_lvl, _msg, ...) do { \ + if (_msg_lvl >= mhi_msg_lvl) { \ + pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ + } \ + if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \ + ipc_log_string(mhi_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ + } \ +} while (0) + + +/* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 used for internal only */ +#define MHI_DEV_UEVENT_CTRL 0 + +struct mhi_dev_uevent_info { + enum mhi_client_channel channel; + enum mhi_ctrl_info ctrl_info; +}; + +struct mhi_dev_iov { + void *addr; + uint32_t buf_size; +}; + + +struct mhi_dev_trace { + unsigned int timestamp; + uint32_t data[TRACE_DATA_MAX]; +}; + +/* MHI Ring related functions */ + +/** + * mhi_ring_init() - Initializes the Ring id to the default un-initialized + * state. Once a start command is received, the respective ring + * is then prepared by fetching the context and updating the + * offset. + * @ring: Ring for the respective context - Channel/Event/Command. + * @type: Command/Event or Channel transfer ring. + * @id: Index to the ring id. For command its usually 1, Event rings + * may vary from 1 to 128. Channels vary from 1 to 256. + */ +void mhi_ring_init(struct mhi_dev_ring *ring, + enum mhi_dev_ring_type type, int id); + +/** + * mhi_ring_start() - Fetches the respective transfer ring's context from + * the host and updates the write offset. + * @ring: Ring for the respective context - Channel/Event/Command. + * @ctx: Transfer ring of type mhi_dev_ring_ctx. + * @dev: MHI device structure. + */ +int mhi_ring_start(struct mhi_dev_ring *ring, + union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi); + +/** + * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally. + * @ring: Ring for the respective context - Channel/Event/Command. + * @wr_offset: Cache the TRE's upto the write offset value. + */ +int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset); + +/** + * mhi_dev_update_wr_offset() - Check for any updates in the write offset. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring); + +/** + * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements + * and invoke the clients callback. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +int mhi_dev_process_ring(struct mhi_dev_ring *ring); + +/** + * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the + * clients callback. + * @ring: Ring for the respective context - Channel/Event/Command. + * @offset: Offset index into the respective ring's cache element. + */ +int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset); + +/** + * mhi_dev_add_element() - Copy the element to the respective transfer rings + * read pointer and increment the index. + * @ring: Ring for the respective context - Channel/Event/Command. + * @element: Transfer ring element to be copied to the host memory. + */ +int mhi_dev_add_element(struct mhi_dev_ring *ring, + union mhi_dev_ring_element_type *element, + struct event_req *ereq, int evt_offset); +/** + * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data + * from device to the host. + * @dst_pa: Physical destination address. + * @src: Source virtual address. + * @len: Numer of bytes to be transferred. + * @mhi: MHI dev structure. + * @req: mhi_req structure + */ +int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len, + struct mhi_dev *mhi, struct mhi_req *req); + +/** + * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data + * from host to the device. + * @dst: Physical destination virtual address. + * @src_pa: Source physical address. + * @len: Numer of bytes to be transferred. + * @mhi: MHI dev structure. + * @req: mhi_req structure + */ +int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len, + struct mhi_dev *mhi, struct mhi_req *mreq); + +/** + * mhi_dev_write_to_host() - Transfer data from device to host. + * Based on support available, either IPA DMA or memcpy is used. + * @host: Host and device address details. + * @buf: Data buffer that needs to be written to the host. + * @size: Data buffer size. + */ +void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *mhi_transfer, + struct event_req *ereq, enum mhi_dev_transfer_type type); +/** + * mhi_dev_read_from_host() - memcpy equivalent API to transfer data + * from host to device. + * @host: Host and device address details. + * @buf: Data buffer that needs to be read from the host. + * @size: Data buffer size. + */ +void mhi_dev_read_from_host(struct mhi_dev *mhi, + struct mhi_addr *mhi_transfer); + +/** + * mhi_dev_read_from_host() - memcpy equivalent API to transfer data + * from host to device. + * @host: Host and device address details. + * @buf: Data buffer that needs to be read from the host. + * @size: Data buffer size. + */ + +void mhi_ring_set_cb(struct mhi_dev_ring *ring, + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, void *ctx)); + +/** + * mhi_ring_set_state() - Sets internal state of the ring for tracking whether + * a ring is being processed, idle or uninitialized. + * @ring: Ring for the respective context - Channel/Event/Command. + * @state: state of type mhi_dev_ring_state. + */ +void mhi_ring_set_state(struct mhi_dev_ring *ring, + enum mhi_dev_ring_state state); + +/** + * mhi_ring_get_state() - Obtains the internal state of the ring. + * @ring: Ring for the respective context - Channel/Event/Command. + */ +enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring); + +/* MMIO related functions */ + +/** + * mhi_dev_mmio_read() - Generic MHI MMIO register read API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @reg_val: Pointer the register value is stored to. + */ +int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset, + uint32_t *reg_value); + +/** + * mhi_dev_mmio_read() - Generic MHI MMIO register write API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @val: Value to be written to the register offset. + */ +int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset, + uint32_t val); + +/** + * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @mask: Register field mask. + * @shift: Register field mask shift value. + * @val: Value to be written to the register offset. + */ +int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t val); +/** + * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API. + * @dev: MHI device structure. + * @offset: MHI address offset from base. + * @mask: Register field mask. + * @shift: Register field mask shift value. + * @reg_val: Pointer the register value is stored to. + */ +int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t *reg_val); +/** + * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt. + * @dev: MHI device structure. + */ + +int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given + * channel id. + * @dev: MHI device structure. + * @chdb_id: Channel id number. + */ +int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); +/** + * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given + * channel id. + * @dev: MHI device structure. + * @chdb_id: Channel id number. + */ +int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); + +/** + * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given + * event ring id. + * @dev: MHI device structure. + * @erdb_id: Event ring id number. + */ +int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); + +/** + * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given + * event ring id. + * @dev: MHI device structure. + * @erdb_id: Event ring id number. + */ +int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); + +/** + * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev); + +/** + *mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell + * interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address. + @dev: MHI device structure. + */ +int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev); + +/** + * mhi_dev_get_crc_base() - Fetch the Command ring context base address. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev); + +/** + * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID. + * @dev: MHI device structure. + * @wr_offset: Pointer of the write offset to be written to. + */ +int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); + +/** + * mhi_dev_mmio_set_env() - Write the Execution Enviornment. + * @dev: MHI device structure. + * @value: Value of the EXEC EVN. + */ +int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value); + +/** + * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_reset(struct mhi_dev *dev); + +/** + * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host. + * @dev: MHI device structure. + */ +int mhi_dev_get_mhi_addr(struct mhi_dev *dev); + +/** + * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3. + * @dev: MHI device structure. + * @state: Pointer of type mhi_dev_state + * @mhi_reset: MHI device reset from host. + */ +int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state, + bool *mhi_reset); + +/** + * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event + * rings, support number of channels, and offsets to the Channel + * and Event doorbell from the host. + * @dev: MHI device structure. + */ +int mhi_dev_mmio_init(struct mhi_dev *dev); + +/** + * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by + * the host. + * @dev: MHI device structure. + */ +int mhi_dev_update_ner(struct mhi_dev *dev); + +/** + * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3. + * @dev: MHI device structure. + */ +int mhi_dev_restore_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3. + * @dev: MHI device structure. + */ +int mhi_dev_backup_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug. + * @dev: MHI device structure. + */ +int mhi_dev_dump_mmio(struct mhi_dev *dev); + +/** + * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation + * unit between device and host to map the Data and Control + * information. + * @dev: MHI device structure. + */ +int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi); + +/** + * mhi_dev_send_state_change_event() - Send state change event to the host + * such as M0/M1/M2/M3. + * @dev: MHI device structure. + * @state: MHI state of type mhi_dev_state + */ +int mhi_dev_send_state_change_event(struct mhi_dev *mhi, + enum mhi_dev_state state); +/** + * mhi_dev_send_ee_event() - Send Execution enviornment state change + * event to the host. + * @dev: MHI device structure. + * @state: MHI state of type mhi_dev_execenv + */ +int mhi_dev_send_ee_event(struct mhi_dev *mhi, + enum mhi_dev_execenv exec_env); +/** + * mhi_dev_syserr() - System error when unexpected events are received. + * @dev: MHI device structure. + */ +int mhi_dev_syserr(struct mhi_dev *mhi); + +/** + * mhi_dev_suspend() - MHI device suspend to stop channel processing at the + * Transfer ring boundary, update the channel state to suspended. + * @dev: MHI device structure. + */ +int mhi_dev_suspend(struct mhi_dev *mhi); + +/** + * mhi_dev_resume() - MHI device resume to update the channel state to running. + * @dev: MHI device structure. + */ +int mhi_dev_resume(struct mhi_dev *mhi); + +/** + * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW + * accelerated data to be send and prevent MHI suspend. + * @dev: MHI device structure. + */ +int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi); + +/** + * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel + * context with IPA when performing a MHI resume. + * @dev: MHI device structure. + */ +int mhi_pcie_config_db_routing(struct mhi_dev *mhi); + +/** + * mhi_uci_init() - Initializes the User control interface (UCI) which + * exposes device nodes for the supported MHI software + * channels. + */ +int mhi_uci_init(void); + +/** + * mhi_dev_net_interface_init() - Initializes the mhi device network interface + * which exposes the virtual network interface (mhi_dev_net0). + * data packets will transfer between MHI host interface (mhi_swip) + * and mhi_dev_net interface using software path + */ +int mhi_dev_net_interface_init(void); + +void mhi_dev_notify_a7_event(struct mhi_dev *mhi); + +void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason); + +#endif /* _MHI_H */ diff --git a/drivers/platform/msm/mhi_dev/mhi_dev_net.c b/drivers/platform/msm/mhi_dev/mhi_dev_net.c new file mode 100644 index 0000000000000000000000000000000000000000..9d87a078dd9d93edd1bd74cb0e6937eaca4a8a8e --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_dev_net.c @@ -0,0 +1,667 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * MHI Device Network interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" + +#define MHI_NET_DRIVER_NAME "mhi_dev_net_drv" +#define MHI_NET_DEV_NAME "mhi_dev_net%d" +#define MHI_NET_DEFAULT_MTU 8192 +#define MHI_NET_IPC_PAGES (100) +#define MHI_MAX_RX_REQ (128) +#define MHI_MAX_TX_REQ (128) + +enum mhi_dev_net_dbg_lvl { + MHI_VERBOSE = 0x1, + MHI_INFO = 0x2, + MHI_DBG = 0x3, + MHI_WARNING = 0x4, + MHI_ERROR = 0x5, + MHI_CRITICAL = 0x6, + MSG_NET_reserved = 0x80000000 +}; + +static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_CRITICAL; +static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_VERBOSE; +static void *mhi_net_ipc_log; + +enum mhi_chan_dir { + MHI_DIR_INVALID = 0x0, + MHI_DIR_OUT = 0x1, + MHI_DIR_IN = 0x2, + MHI_DIR__reserved = 0x80000000 +}; + +struct mhi_dev_net_chan_attr { + /* SW maintained channel id */ + enum mhi_client_channel chan_id; + /* maximum buffer size for this channel */ + size_t max_packet_size; + /* direction of the channel, see enum mhi_chan_dir */ + enum mhi_chan_dir dir; +}; + +#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2) + +#define mhi_dev_net_log(_msg_lvl, _msg, ...) do { \ + if (_msg_lvl >= mhi_net_msg_lvl) { \ + pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ + } \ + if (mhi_net_ipc_log && (_msg_lvl >= mhi_net_ipc_log_lvl)) { \ + ipc_log_string(mhi_net_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ + } \ +} while (0) + +module_param(mhi_net_msg_lvl, uint, 0644); +MODULE_PARM_DESC(mhi_net_msg_lvl, "mhi dev net dbg lvl"); + +module_param(mhi_net_ipc_log_lvl, uint, 0644); +MODULE_PARM_DESC(mhi_net_ipc_log_lvl, "mhi dev net dbg lvl"); + +struct mhi_dev_net_client { + /* write channel - always even*/ + u32 out_chan; + /* read channel - always odd */ + u32 in_chan; + struct mhi_dev_client *out_handle; + struct mhi_dev_client *in_handle; + /*process pendig packets */ + struct workqueue_struct *pending_pckt_wq; + struct work_struct xmit_work; + /*Read data from host work queue*/ + atomic_t rx_enabled; + atomic_t tx_enabled; + struct net_device *dev; + struct sk_buff_head tx_buffers; + struct list_head rx_buffers; + struct list_head wr_req_buffers; + struct mhi_dev_net_ctxt *net_ctxt; + /*To check write channel is empty or not*/ + spinlock_t wrt_lock; + spinlock_t rd_lock; + struct mutex in_chan_lock; + struct mutex out_chan_lock; +}; + +struct mhi_dev_net_ctxt { + struct mhi_dev_net_chan_attr chan_attr[MHI_MAX_SOFTWARE_CHANNELS]; + struct mhi_dev_net_client *client_handle; + void (*net_event_notifier)(struct mhi_dev_client_cb_reason *cb); +}; + +static struct mhi_dev_net_ctxt mhi_net_ctxt; +static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *); + +static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt) +{ + u32 channel = 0; + struct mhi_dev_net_chan_attr *chan_attrib = NULL; + + channel = MHI_CLIENT_IP_SW_4_OUT; + chan_attrib = &mhi_ctxt->chan_attr[channel]; + chan_attrib->dir = MHI_DIR_OUT; + chan_attrib->chan_id = channel; + chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE; + mhi_dev_net_log(MHI_INFO, "Write chan attributes dir %d chan_id %d\n", + chan_attrib->dir, chan_attrib->chan_id); + + channel = MHI_CLIENT_IP_SW_4_IN; + chan_attrib = &mhi_ctxt->chan_attr[channel]; + chan_attrib->dir = MHI_DIR_IN; + chan_attrib->chan_id = channel; + chan_attrib->max_packet_size = TRB_MAX_DATA_SIZE; + mhi_dev_net_log(MHI_INFO, "Read chan attributes dir %d chan_id %d\n", + chan_attrib->dir, chan_attrib->chan_id); + return 0; +} + +static void mhi_dev_net_process_queue_packets(struct work_struct *work) +{ + struct mhi_dev_net_client *client = container_of(work, + struct mhi_dev_net_client, xmit_work); + unsigned long flags = 0; + int xfer_data = 0; + struct sk_buff *skb = NULL; + struct mhi_req *wreq = NULL; + + if (mhi_dev_channel_isempty(client->in_handle)) { + mhi_dev_net_log(MHI_INFO, "%s stop network xmmit\n", __func__); + netif_stop_queue(client->dev); + return; + } + while (!((skb_queue_empty(&client->tx_buffers)) || + (list_empty(&client->wr_req_buffers)))) { + spin_lock_irqsave(&client->wrt_lock, flags); + skb = skb_dequeue(&(client->tx_buffers)); + if (!skb) { + mhi_dev_net_log(MHI_INFO, + "SKB is NULL from dequeue\n"); + spin_unlock_irqrestore(&client->wrt_lock, flags); + return; + } + wreq = container_of(client->wr_req_buffers.next, + struct mhi_req, list); + list_del_init(&wreq->list); + + wreq->client = client->in_handle; + wreq->context = skb; + wreq->buf = skb->data; + wreq->len = skb->len; + wreq->chan = client->in_chan; + wreq->mode = IPA_DMA_ASYNC; + if (skb_queue_empty(&client->tx_buffers) || + list_empty(&client->wr_req_buffers)) { + wreq->snd_cmpl = 1; + } else + wreq->snd_cmpl = 0; + spin_unlock_irqrestore(&client->wrt_lock, flags); + xfer_data = mhi_dev_write_channel(wreq); + if (xfer_data <= 0) { + pr_err("%s(): Failed to write skb len %d\n", + __func__, skb->len); + kfree_skb(skb); + return; + } + client->dev->stats.tx_packets++; + + /* Check if free buffers are available*/ + if (mhi_dev_channel_isempty(client->in_handle)) { + mhi_dev_net_log(MHI_INFO, + "%s buffers are full stop xmit\n", + __func__); + netif_stop_queue(client->dev); + break; + } + } /* While TX queue is not empty */ +} + +static void mhi_dev_net_event_notifier(struct mhi_dev_client_cb_reason *reason) +{ + struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle; + + if (reason->reason == MHI_DEV_TRE_AVAILABLE) { + if (reason->ch_id % 2) { + if (netif_queue_stopped(client_handle->dev)) { + netif_wake_queue(client_handle->dev); + queue_work(client_handle->pending_pckt_wq, + &client_handle->xmit_work); + } + } else + mhi_dev_net_client_read(client_handle); + } +} + +static __be16 mhi_dev_net_eth_type_trans(struct sk_buff *skb) +{ + __be16 protocol = 0; + /* Determine L3 protocol */ + switch (skb->data[0] & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + /* Default is QMAP */ + protocol = htons(ETH_P_MAP); + break; + } + return protocol; +} + +static void mhi_dev_net_read_completion_cb(void *req) +{ + struct mhi_dev_net_client *net_handle = + mhi_net_ctxt.client_handle; + struct mhi_req *mreq = + (struct mhi_req *)req; + struct sk_buff *skb = mreq->context; + unsigned long flags; + + skb->len = mreq->actual_len; + skb->protocol = + mhi_dev_net_eth_type_trans(skb); + skb_put(skb, mreq->actual_len); + net_handle->dev->stats.rx_packets++; + skb->dev = net_handle->dev; + netif_rx(skb); + spin_lock_irqsave(&net_handle->rd_lock, flags); + list_add_tail(&mreq->list, &net_handle->rx_buffers); + spin_unlock_irqrestore(&net_handle->rd_lock, flags); +} + +static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle) +{ + int bytes_avail = 0; + int ret_val = 0; + u32 chan = 0; + struct mhi_dev_client *client_handle = NULL; + struct mhi_req *req; + struct sk_buff *skb; + unsigned long flags; + + client_handle = mhi_handle->out_handle; + chan = mhi_handle->out_chan; + if (!atomic_read(&mhi_handle->rx_enabled)) + return -EPERM; + while (1) { + spin_lock_irqsave(&mhi_handle->rd_lock, flags); + if (list_empty(&mhi_handle->rx_buffers)) { + spin_unlock_irqrestore(&mhi_handle->rd_lock, flags); + break; + } + + req = container_of(mhi_handle->rx_buffers.next, + struct mhi_req, list); + list_del_init(&req->list); + spin_unlock_irqrestore(&mhi_handle->rd_lock, flags); + skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_ATOMIC); + if (skb == NULL) { + pr_err("%s(): skb alloc failed\n", __func__); + spin_lock_irqsave(&mhi_handle->rd_lock, flags); + list_add_tail(&req->list, &mhi_handle->rx_buffers); + spin_unlock_irqrestore(&mhi_handle->rd_lock, flags); + ret_val = -ENOMEM; + return ret_val; + } + + req->client = client_handle; + req->chan = chan; + req->buf = skb->data; + req->len = MHI_NET_DEFAULT_MTU; + req->context = skb; + req->mode = IPA_DMA_ASYNC; + bytes_avail = mhi_dev_read_channel(req); + + if (bytes_avail < 0) { + pr_err("Failed to read chan %d bytes_avail = %d\n", + chan, bytes_avail); + spin_lock_irqsave(&mhi_handle->rd_lock, flags); + kfree_skb(skb); + list_add_tail(&req->list, &mhi_handle->rx_buffers); + spin_unlock_irqrestore(&mhi_handle->rd_lock, flags); + ret_val = -EIO; + return 0; + } + /* no data to send to network stack, break */ + if (!bytes_avail) { + spin_lock_irqsave(&mhi_handle->rd_lock, flags); + kfree_skb(skb); + list_add_tail(&req->list, &mhi_handle->rx_buffers); + spin_unlock_irqrestore(&mhi_handle->rd_lock, flags); + return 0; + } + } + /* coming out while only in case of no data or error */ + return ret_val; + +} + +static void mhi_dev_net_write_completion_cb(void *req) +{ + struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle; + struct mhi_req *wreq = (struct mhi_req *)req; + struct sk_buff *skb = wreq->context; + unsigned long flags; + + kfree_skb(skb); + spin_lock_irqsave(&client_handle->wrt_lock, flags); + list_add_tail(&wreq->list, &client_handle->wr_req_buffers); + spin_unlock_irqrestore(&client_handle->wrt_lock, flags); +} + +static int mhi_dev_net_alloc_write_reqs(struct mhi_dev_net_client *client) +{ + int nreq = 0, rc = 0; + struct mhi_req *wreq; + + while (nreq < MHI_MAX_TX_REQ) { + wreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC); + if (!wreq) + return -ENOMEM; + wreq->client_cb = mhi_dev_net_write_completion_cb; + list_add_tail(&wreq->list, &client->wr_req_buffers); + nreq++; + } + mhi_dev_net_log(MHI_INFO, + "mhi write reqs allocation success\n"); + return rc; + +} + +static int mhi_dev_net_alloc_read_reqs(struct mhi_dev_net_client *client) +{ + int nreq = 0, rc = 0; + struct mhi_req *mreq; + + while (nreq < MHI_MAX_RX_REQ) { + mreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC); + if (!mreq) + return -ENOMEM; + mreq->len = TRB_MAX_DATA_SIZE; + mreq->client_cb = mhi_dev_net_read_completion_cb; + list_add_tail(&mreq->list, &client->rx_buffers); + nreq++; + } + mhi_dev_net_log(MHI_INFO, + "mhi read reqs allocation success\n"); + return rc; + +} + +static int mhi_dev_net_open(struct net_device *dev) +{ + struct mhi_dev_net_client *mhi_dev_net_ptr = + *(struct mhi_dev_net_client **)netdev_priv(dev); + mhi_dev_net_log(MHI_INFO, + "mhi_net_dev interface is up for IN %d OUT %d\n", + mhi_dev_net_ptr->out_chan, + mhi_dev_net_ptr->in_chan); + netif_start_queue(dev); + return 0; +} + +static netdev_tx_t mhi_dev_net_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_dev_net_client *mhi_dev_net_ptr = + *(struct mhi_dev_net_client **)netdev_priv(dev); + unsigned long flags; + + if (skb->len <= 0) { + mhi_dev_net_log(MHI_ERROR, + "Invalid skb received freeing skb\n"); + kfree_skb(skb); + return NETDEV_TX_OK; + } + spin_lock_irqsave(&mhi_dev_net_ptr->wrt_lock, flags); + skb_queue_tail(&(mhi_dev_net_ptr->tx_buffers), skb); + spin_unlock_irqrestore(&mhi_dev_net_ptr->wrt_lock, flags); + + queue_work(mhi_dev_net_ptr->pending_pckt_wq, + &mhi_dev_net_ptr->xmit_work); + + return NETDEV_TX_OK; +} + +static int mhi_dev_net_stop(struct net_device *dev) +{ + netif_stop_queue(dev); + mhi_dev_net_log(MHI_VERBOSE, "mhi_dev_net interface is down\n"); + return 0; +} + +static int mhi_dev_net_change_mtu(struct net_device *dev, int new_mtu) +{ + if (0 > new_mtu || MHI_NET_DEFAULT_MTU < new_mtu) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops mhi_dev_net_ops_ip = { + .ndo_open = mhi_dev_net_open, + .ndo_stop = mhi_dev_net_stop, + .ndo_start_xmit = mhi_dev_net_xmit, + .ndo_change_mtu = mhi_dev_net_change_mtu, +}; + +static void mhi_dev_net_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_dev_net_ops_ip; + ether_setup(dev); + + /* set this after calling ether_setup */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->mtu = MHI_NET_DEFAULT_MTU; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +} + +static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr) +{ + int ret = 0; + struct mhi_dev_net_client **mhi_dev_net_ctxt = NULL; + struct net_device *netdev; + + if (!mhi_dev_net_ptr) + return -EINVAL; + + /* Initialize skb list head to queue the packets for mhi dev client */ + skb_queue_head_init(&(mhi_dev_net_ptr->tx_buffers)); + + mhi_dev_net_log(MHI_INFO, + "mhi_dev_net interface registration\n"); + netdev = alloc_netdev(sizeof(struct mhi_dev_net_client), + MHI_NET_DEV_NAME, NET_NAME_PREDICTABLE, + mhi_dev_net_setup); + if (!netdev) { + pr_err("Failed to allocate netdev for mhi_dev_net\n"); + goto net_dev_alloc_fail; + } + + mhi_dev_net_ctxt = netdev_priv(netdev); + mhi_dev_net_ptr->dev = netdev; + *mhi_dev_net_ctxt = mhi_dev_net_ptr; + ret = register_netdev(mhi_dev_net_ptr->dev); + if (ret) { + pr_err("Failed to register mhi_dev_net device\n"); + goto net_dev_reg_fail; + } + mhi_dev_net_log(MHI_INFO, "Successfully registred mhi_dev_net\n"); + return 0; + +net_dev_reg_fail: + free_netdev(mhi_dev_net_ptr->dev); +net_dev_alloc_fail: + mhi_dev_close_channel(mhi_dev_net_ptr->in_handle); + mhi_dev_close_channel(mhi_dev_net_ptr->out_handle); + mhi_dev_net_ptr->dev = NULL; + return -ENOMEM; +} + +static int mhi_dev_net_open_channels(struct mhi_dev_net_client *client) +{ + int rc = 0; + int ret = 0; + struct list_head *cp, *q; + struct mhi_req *mreq; + + mhi_dev_net_log(MHI_DBG, "opening OUT %d IN %d channels\n", + client->out_chan, + client->in_chan); + mutex_lock(&client->out_chan_lock); + mutex_lock(&client->in_chan_lock); + mhi_dev_net_log(MHI_DBG, + "Initializing inbound chan %d.\n", + client->in_chan); + + rc = mhi_dev_open_channel(client->out_chan, &client->out_handle, + mhi_net_ctxt.net_event_notifier); + if (rc < 0) { + mhi_dev_net_log(MHI_ERROR, + "Failed to open chan %d, ret 0x%x\n", + client->out_chan, rc); + goto handle_not_rdy_err; + } else + atomic_set(&client->rx_enabled, 1); + + rc = mhi_dev_open_channel(client->in_chan, &client->in_handle, + mhi_net_ctxt.net_event_notifier); + if (rc < 0) { + mhi_dev_net_log(MHI_ERROR, + "Failed to open chan %d, ret 0x%x\n", + client->in_chan, rc); + goto handle_in_err; + } else + atomic_set(&client->tx_enabled, 1); + + mutex_unlock(&client->in_chan_lock); + mutex_unlock(&client->out_chan_lock); + mhi_dev_net_log(MHI_INFO, "IN %d, OUT %d channels are opened", + client->in_chan, client->out_chan); + + INIT_LIST_HEAD(&client->rx_buffers); + INIT_LIST_HEAD(&client->wr_req_buffers); + /* pre allocate read request buffer */ + + ret = mhi_dev_net_alloc_read_reqs(client); + if (ret) { + pr_err("failed to allocate rx req buffers\n"); + goto rx_req_failed; + } + ret = mhi_dev_net_alloc_write_reqs(client); + if (ret) { + pr_err("failed to allocate write req buffers\n"); + goto tx_req_failed; + } + if (atomic_read(&client->tx_enabled)) { + ret = mhi_dev_net_enable_iface(client); + if (ret < 0) + mhi_dev_net_log(MHI_ERROR, + "failed to enable mhi_dev_net iface\n"); + } + return ret; +tx_req_failed: + list_for_each_safe(cp, q, &client->rx_buffers); + mreq = list_entry(cp, struct mhi_req, list); + list_del(cp); + kfree(mreq); +rx_req_failed: + mhi_dev_close_channel(client->in_handle); +handle_in_err: + mhi_dev_close_channel(client->out_handle); +handle_not_rdy_err: + mutex_unlock(&client->in_chan_lock); + mutex_unlock(&client->out_chan_lock); + return rc; +} + +static int mhi_dev_net_close(void) +{ + struct mhi_dev_net_client *client; + + mhi_dev_net_log(MHI_INFO, + "mhi_dev_net module is removed\n"); + client = mhi_net_ctxt.client_handle; + mhi_dev_close_channel(client->out_handle); + mhi_dev_close_channel(client->in_handle); + atomic_set(&client->tx_enabled, 0); + atomic_set(&client->rx_enabled, 0); + if (client->dev != NULL) { + netif_stop_queue(client->dev); + unregister_netdev(client->dev); + free_netdev(client->dev); + client->dev = NULL; + } + /* freeing mhi client and IPC context */ + kfree(client); + kfree(mhi_net_ipc_log); + return 0; +} + +static int mhi_dev_net_rgstr_client(struct mhi_dev_net_client *client, int idx) +{ + client->out_chan = idx; + client->in_chan = idx + 1; + mutex_init(&client->in_chan_lock); + mutex_init(&client->out_chan_lock); + spin_lock_init(&client->wrt_lock); + spin_lock_init(&client->rd_lock); + mhi_dev_net_log(MHI_INFO, "Registering out %d, In %d channels\n", + client->out_chan, client->in_chan); + + /* Open IN and OUT channels for Network client*/ + mhi_dev_net_open_channels(client); + return 0; +} + +int mhi_dev_net_interface_init(void) +{ + int ret_val = 0; + int index = 0; + struct mhi_dev_net_client *mhi_net_client = NULL; + + mhi_net_client = kzalloc(sizeof(struct mhi_dev_net_client), GFP_KERNEL); + if (!mhi_net_client) + return -ENOMEM; + + mhi_net_ipc_log = ipc_log_context_create(MHI_NET_IPC_PAGES, + "mhi-net", 0); + if (mhi_net_ipc_log == NULL) + mhi_dev_net_log(MHI_DBG, + "Failed to create IPC logging for mhi_dev_net\n"); + mhi_net_ctxt.client_handle = mhi_net_client; + + /*Process pending packet work queue*/ + mhi_net_client->pending_pckt_wq = + create_singlethread_workqueue("pending_xmit_pckt_wq"); + INIT_WORK(&mhi_net_client->xmit_work, + mhi_dev_net_process_queue_packets); + + mhi_dev_net_log(MHI_INFO, + "Registering for MHI transfer events from host\n"); + mhi_net_ctxt.net_event_notifier = mhi_dev_net_event_notifier; + + ret_val = mhi_dev_net_init_ch_attributes(&mhi_net_ctxt); + if (ret_val < 0) { + mhi_dev_net_log(MHI_ERROR, + "Failed to init client attributes\n"); + goto channel_init_fail; + } + mhi_dev_net_log(MHI_DBG, "Initializing client\n"); + index = MHI_CLIENT_IP_SW_4_OUT; + ret_val = mhi_dev_net_rgstr_client(mhi_net_client, index); + if (ret_val) { + mhi_dev_net_log(MHI_CRITICAL, + "Failed to reg client %d ret 0\n", ret_val); + goto client_register_fail; + } + return ret_val; + +channel_init_fail: + kfree(mhi_net_client); + kfree(mhi_net_ipc_log); + return ret_val; +client_register_fail: + kfree(mhi_net_client); + kfree(mhi_net_ipc_log); + return ret_val; +} +EXPORT_SYMBOL(mhi_dev_net_interface_init); + +void __exit mhi_dev_net_exit(void) +{ + mhi_dev_net_log(MHI_INFO, + "MHI Network Interface Module exited\n"); + mhi_dev_net_close(); +} +EXPORT_SYMBOL(mhi_dev_net_exit); diff --git a/drivers/platform/msm/mhi_dev/mhi_hwio.h b/drivers/platform/msm/mhi_dev/mhi_hwio.h new file mode 100644 index 0000000000000000000000000000000000000000..936cac74a17248e561ceca828715e901906892fc --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_hwio.h @@ -0,0 +1,197 @@ +/* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MHI_HWIO_ +#define _MHI_HWIO_ + +/* MHI register definition */ +#define MHI_CTRL_INT_STATUS_A7 (0x0004) +#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK 0xffffffff +#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT 0x0 + +#define MHI_CHDB_INT_STATUS_A7_n(n) (0x0028 + 0x4 * (n)) +#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff +#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0 + +#define MHI_ERDB_INT_STATUS_A7_n(n) (0x0038 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff +#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0 + +#define MHI_CTRL_INT_CLEAR_A7 (0x004C) +#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK 0xffffffff +#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT 0x0 +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x0070 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff +#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0 + +#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x0080 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff +#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0 + +#define MHI_CTRL_INT_MASK_A7 (0x0094) +#define MHI_CTRL_INT_MASK_A7_MASK_MASK 0x3 +#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT 0x0 +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_MHICTRL_SHFT 0 +#define MHI_CTRL_CRDB_MASK BIT(1) +#define MHI_CTRL_CRDB_SHFT 1 + +#define MHI_CHDB_INT_MASK_A7_n(n) (0x00B8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK 0xffffffff +#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT 0x0 + +#define MHI_ERDB_INT_MASK_A7_n(n) (0x00C8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK 0xffffffff +#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT 0x0 + +#define MHIREGLEN (0x0100) +#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff +#define MHIREGLEN_MHIREGLEN_SHIFT 0x0 + +#define MHIVER (0x0108) +#define MHIVER_MHIVER_MASK 0xffffffff +#define MHIVER_MHIVER_SHIFT 0x0 + +#define MHICFG (0x0110) +#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000 +#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18 +#define MHICFG_NER_MASK 0xff0000 +#define MHICFG_NER_SHIFT 0x10 +#define MHICFG_RESERVED_BITS15_8_MASK 0xff00 +#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8 +#define MHICFG_NCH_MASK 0xff +#define MHICFG_NCH_SHIFT 0x0 + +#define CHDBOFF (0x0118) +#define CHDBOFF_CHDBOFF_MASK 0xffffffff +#define CHDBOFF_CHDBOFF_SHIFT 0x0 + +#define ERDBOFF (0x0120) +#define ERDBOFF_ERDBOFF_MASK 0xffffffff +#define ERDBOFF_ERDBOFF_SHIFT 0x0 + +#define BHIOFF (0x0128) +#define BHIOFF_BHIOFF_MASK 0xffffffff +#define BHIOFF_BHIOFF_SHIFT 0x0 + +#define DEBUGOFF (0x0130) +#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff +#define DEBUGOFF_DEBUGOFF_SHIFT 0x0 + +#define MHICTRL (0x0138) +#define MHICTRL_MHISTATE_MASK 0x0000FF00 +#define MHICTRL_MHISTATE_SHIFT 0x8 +#define MHICTRL_RESET_MASK 0x2 +#define MHICTRL_RESET_SHIFT 0x1 + +#define MHISTATUS (0x0148) +#define MHISTATUS_MHISTATE_MASK 0x0000ff00 +#define MHISTATUS_MHISTATE_SHIFT 0x8 +#define MHISTATUS_SYSERR_MASK 0x4 +#define MHISTATUS_SYSERR_SHIFT 0x2 +#define MHISTATUS_READY_MASK 0x1 +#define MHISTATUS_READY_SHIFT 0x0 + +#define CCABAP_LOWER (0x0158) +#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0 + +#define CCABAP_HIGHER (0x015C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0 + +#define ECABAP_LOWER (0x0160) +#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0 + +#define ECABAP_HIGHER (0x0164) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0 + +#define CRCBAP_LOWER (0x0168) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0 + +#define CRCBAP_HIGHER (0x016C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0 + +#define CRDB_LOWER (0x0170) +#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff +#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0 + +#define CRDB_HIGHER (0x0174) +#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0 + +#define MHICTRLBASE_LOWER (0x0180) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0 + +#define MHICTRLBASE_HIGHER (0x0184) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0 + +#define MHICTRLLIMIT_LOWER (0x0188) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0 + +#define MHICTRLLIMIT_HIGHER (0x018C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0 + +#define MHIDATABASE_LOWER (0x0198) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0 + +#define MHIDATABASE_HIGHER (0x019C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0 + +#define MHIDATALIMIT_LOWER (0x01A0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0 + +#define MHIDATALIMIT_HIGHER (0x01A4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0 + +#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n)) +#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff +#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0 + +#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n)) +#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff +#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0 + +#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n)) +#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff +#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0 + +#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n)) +#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff +#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0 + +#define BHI_INTVEC (0x220) +#define BHI_INTVEC_MASK 0xFFFFFFFF +#define BHI_INTVEC_SHIFT 0 + +#define BHI_EXECENV (0x228) +#define BHI_EXECENV_MASK 0xFFFFFFFF +#define BHI_EXECENV_SHIFT 0 + +#define BHI_IMGTXDB (0x218) + +#endif diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..1f8ade6dd866a5552331d2fe4e8559d16c151c67 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c @@ -0,0 +1,733 @@ +/* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" +#include "mhi_hwio.h" + +int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset, + uint32_t *reg_value) +{ + void __iomem *addr; + + if (WARN_ON(!dev)) + return -EINVAL; + + addr = dev->mmio_base_addr + offset; + + *reg_value = readl_relaxed(addr); + + pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read); + +int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset, + uint32_t val) +{ + void __iomem *addr; + + if (WARN_ON(!dev)) + return -EINVAL; + + addr = dev->mmio_base_addr + offset; + + writel_relaxed(val, addr); + + pr_debug("reg write:0x%x with value 0x%x\n", offset, val); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_write); + +int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t val) +{ + uint32_t reg_val; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, offset, ®_val); + + reg_val &= ~mask; + reg_val |= ((val << shift) & mask); + + mhi_dev_mmio_write(dev, offset, reg_val); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_masked_write); + +int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset, + uint32_t mask, uint32_t shift, + uint32_t *reg_val) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, offset, reg_val); + + *reg_val &= mask; + *reg_val >>= shift; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_masked_read); + +static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev, + uint32_t chdb_id, bool enable) +{ + uint32_t chid_mask, chid_idx, chid_shft, val = 0; + + chid_shft = chdb_id%32; + chid_mask = (1 << chid_shft); + chid_idx = chdb_id/32; + + if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB) { + pr_err("Invalid channel id:%d\n", chid_idx); + return -EINVAL; + } + + if (enable) + val = 1; + + mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx), + chid_mask, chid_shft, val); + + mhi_dev_mmio_read(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx), + &dev->chdb[chid_idx].mask); + + return 0; +} + +int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7); + +int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7); + +static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev, + uint32_t erdb_ch_id, bool enable) +{ + uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0; + + erdb_id_shft = erdb_ch_id%32; + erdb_id_mask = (1 << erdb_id_shft); + erdb_id_idx = erdb_ch_id/32; + + if (enable) + val = 1; + + mhi_dev_mmio_masked_write(dev, + MHI_ERDB_INT_MASK_A7_n(erdb_id_idx), + erdb_id_mask, erdb_id_shft, val); + + return 0; +} + +int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7); + +int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7); + +int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state, + bool *mhi_reset) +{ + uint32_t reg_value = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_read(dev, MHICTRL, + MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state); + + mhi_dev_mmio_read(dev, MHICTRL, ®_value); + + if (reg_value & MHICTRL_RESET_MASK) + *mhi_reset = true; + + pr_debug("MHICTRL is 0x%x\n", reg_value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state); + +static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable) +{ + uint32_t mask = 0, i = 0; + + if (enable) + mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + mhi_dev_mmio_write(dev, + MHI_CHDB_INT_MASK_A7_n(i), mask); + dev->chdb[i].mask = mask; + } + + return 0; +} + +int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_chdb_interrupts(dev, true); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts); + +int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_chdb_interrupts(dev, false); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts); + +int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev) +{ + uint32_t i; + + if (WARN_ON(!dev)) + return -EINVAL; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_dev_mmio_read(dev, + MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts); + +static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable) +{ + uint32_t mask = 0, i; + + if (enable) + mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_dev_mmio_write(dev, + MHI_ERDB_INT_MASK_A7_n(i), mask); + + return 0; +} + +int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_erdb_interrupts(dev, true); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts); + +int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_set_erdb_interrupts(dev, false); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts); + +int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev) +{ + uint32_t i; + + if (WARN_ON(!dev)) + return -EINVAL; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i), + &dev->evdb[i].status); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts); + +int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt); + +int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt); + +int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int); + + dev->ctrl_int &= 0x1; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt); + +int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int); + + dev->cmd_int &= 0x10; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt); + +int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt); + +int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt); + +static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev) +{ + mhi_dev_mmio_disable_ctrl_interrupt(dev); + + mhi_dev_mmio_disable_cmdb_interrupt(dev); + + mhi_dev_mmio_mask_chdb_interrupts(dev); + + mhi_dev_mmio_mask_erdb_interrupts(dev); +} + +int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev) +{ + uint32_t i = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i), + MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK); + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i), + MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK); + + mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, + MHI_CTRL_INT_CRDB_CLEAR); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts); + +int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev) +{ + uint32_t ccabap_value = 0, offset = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value); + + dev->ch_ctx_shadow.host_pa = ccabap_value; + dev->ch_ctx_shadow.host_pa <<= 32; + + mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value); + + dev->ch_ctx_shadow.host_pa |= ccabap_value; + + offset = (uint32_t)(dev->ch_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base); + +int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev) +{ + uint32_t ecabap_value = 0, offset = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value); + + dev->ev_ctx_shadow.host_pa = ecabap_value; + dev->ev_ctx_shadow.host_pa <<= 32; + + mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value); + + dev->ev_ctx_shadow.host_pa |= ecabap_value; + + offset = (uint32_t)(dev->ev_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base); + +int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev) +{ + uint32_t crcbap_value = 0, offset = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value); + + dev->cmd_ctx_shadow.host_pa = crcbap_value; + dev->cmd_ctx_shadow.host_pa <<= 32; + + mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value); + + dev->cmd_ctx_shadow.host_pa |= crcbap_value; + + offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa - + dev->ctrl_base.host_pa); + + dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset; + dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base); + +int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0, ch_start_idx = 0; + + if (WARN_ON(!ring)) + return -EINVAL; + + ch_start_idx = ring->mhi_dev->ch_ring_start; + + mhi_dev_mmio_read(ring->mhi_dev, + CHDB_HIGHER_n(ring->id-ch_start_idx), &value); + + *wr_offset = value; + *wr_offset <<= 32; + + mhi_dev_mmio_read(ring->mhi_dev, + CHDB_LOWER_n(ring->id-ch_start_idx), &value); + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db); + +int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0, ev_idx_start = 0; + + if (WARN_ON(!ring)) + return -EINVAL; + + ev_idx_start = ring->mhi_dev->ev_ring_start; + mhi_dev_mmio_read(ring->mhi_dev, + ERDB_HIGHER_n(ring->id - ev_idx_start), &value); + + *wr_offset = value; + *wr_offset <<= 32; + + mhi_dev_mmio_read(ring->mhi_dev, + ERDB_LOWER_n(ring->id - ev_idx_start), &value); + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db); + +int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset) +{ + uint32_t value = 0; + + if (WARN_ON(!ring)) + return -EINVAL; + + mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value); + + *wr_offset = value; + *wr_offset <<= 32; + + mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value); + + *wr_offset |= value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db); + +int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_write(dev, BHI_EXECENV, value); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_set_env); + +int mhi_dev_mmio_reset(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_write(dev, MHICTRL, 0); + mhi_dev_mmio_write(dev, MHISTATUS, 0); + mhi_dev_mmio_clear_interrupts(dev); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_reset); + +int mhi_dev_restore_mmio(struct mhi_dev *dev) +{ + uint32_t i, reg_cntl_value; + void *reg_cntl_addr; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_mask_interrupts(dev); + + for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) { + reg_cntl_addr = dev->mmio_base_addr + (i * 4); + reg_cntl_value = dev->mmio_backup[i]; + writel_relaxed(reg_cntl_value, reg_cntl_addr); + } + + mhi_dev_mmio_clear_interrupts(dev); + mhi_dev_mmio_enable_ctrl_interrupt(dev); + + /*Enable chdb interrupt*/ + mhi_dev_mmio_enable_chdb_interrupts(dev); + + /* Mask and enable control interrupt */ + mb(); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_restore_mmio); + +int mhi_dev_backup_mmio(struct mhi_dev *dev) +{ + uint32_t i = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++) + dev->mmio_backup[i] = + readl_relaxed(dev->mmio_base_addr + (i * 4)); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_backup_mmio); + +int mhi_dev_get_mhi_addr(struct mhi_dev *dev) +{ + uint32_t data_value = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value); + dev->host_addr.ctrl_base_lsb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value); + dev->host_addr.ctrl_base_msb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value); + dev->host_addr.ctrl_limit_lsb = data_value; + + mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value); + dev->host_addr.ctrl_limit_msb = data_value; + + mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value); + dev->host_addr.data_base_lsb = data_value; + + mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value); + dev->host_addr.data_base_msb = data_value; + + mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value); + dev->host_addr.data_limit_lsb = data_value; + + mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value); + dev->host_addr.data_limit_msb = data_value; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_get_mhi_addr); + +int mhi_dev_mmio_init(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len); + + mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK, + MHICFG_NER_SHIFT, &dev->cfg.event_rings); + + mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset); + + mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset); + + dev->cfg.channels = NUM_CHANNELS; + + if (!dev->mmio_initialized) + mhi_dev_mmio_reset(dev); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_mmio_init); + +int mhi_dev_update_ner(struct mhi_dev *dev) +{ + if (WARN_ON(!dev)) + return -EINVAL; + + mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK, + MHICFG_NER_SHIFT, &dev->cfg.event_rings); + + pr_debug("NER in HW :%d\n", dev->cfg.event_rings); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_update_ner); + +int mhi_dev_dump_mmio(struct mhi_dev *dev) +{ + uint32_t r1, r2, r3, r4, i, offset = 0; + + if (WARN_ON(!dev)) + return -EINVAL; + + for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) { + mhi_dev_mmio_read(dev, offset, &r1); + + mhi_dev_mmio_read(dev, offset+4, &r2); + + mhi_dev_mmio_read(dev, offset+8, &r3); + + mhi_dev_mmio_read(dev, offset+0xC, &r4); + + offset += 0x10; + pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + offset, r1, r2, r3, r4); + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_dump_mmio); diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c new file mode 100644 index 0000000000000000000000000000000000000000..14d74026e69e06e928f48e62fc9f980de0c75100 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_ring.c @@ -0,0 +1,484 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mhi.h" + +static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p) +{ + uint64_t rbase; + + rbase = ring->ring_ctx->generic.rbase; + + return (p - rbase)/sizeof(union mhi_dev_ring_element_type); +} + +static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring) +{ + return ring->ring_ctx->generic.rlen/ + sizeof(union mhi_dev_ring_element_type); +} + +int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring, + uint32_t start, uint32_t end) +{ + struct mhi_addr host_addr; + + /* fetch ring elements from start->end, take care of wrap-around case */ + if (ring->mhi_dev->use_ipa) { + host_addr.host_pa = ring->ring_shadow.host_pa + + sizeof(union mhi_dev_ring_element_type) * start; + host_addr.phy_addr = ring->ring_cache_dma_handle + + (sizeof(union mhi_dev_ring_element_type) * start); + } else { + host_addr.device_va = ring->ring_shadow.device_va + + sizeof(union mhi_dev_ring_element_type) * start; + host_addr.virt_addr = &ring->ring_cache[start]; + } + host_addr.size = (end-start) * sizeof(union mhi_dev_ring_element_type); + if (start < end) { + mhi_dev_read_from_host(ring->mhi_dev, &host_addr); + } else if (start > end) { + /* copy from 'start' to ring end, then ring start to 'end'*/ + host_addr.size = (ring->ring_size-start) * + sizeof(union mhi_dev_ring_element_type); + mhi_dev_read_from_host(ring->mhi_dev, &host_addr); + if (end) { + /* wrapped around */ + host_addr.device_pa = ring->ring_shadow.device_pa; + host_addr.device_va = ring->ring_shadow.device_va; + host_addr.host_pa = ring->ring_shadow.host_pa; + host_addr.virt_addr = &ring->ring_cache[0]; + host_addr.phy_addr = ring->ring_cache_dma_handle; + host_addr.size = (end * + sizeof(union mhi_dev_ring_element_type)); + mhi_dev_read_from_host(ring->mhi_dev, &host_addr); + } + } + return 0; +} + +int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset) +{ + uint32_t old_offset = 0; + struct mhi_dev *mhi_ctx; + + if (WARN_ON(!ring)) + return -EINVAL; + + mhi_ctx = ring->mhi_dev; + + if (ring->wr_offset == wr_offset) { + mhi_log(MHI_MSG_VERBOSE, + "nothing to cache for ring %d, local wr_ofst %d\n", + ring->id, ring->wr_offset); + mhi_log(MHI_MSG_VERBOSE, + "new wr_offset %d\n", wr_offset); + return 0; + } + + old_offset = ring->wr_offset; + + /* + * copy the elements starting from old_offset to wr_offset + * take in to account wrap around case event rings are not + * cached, not required + */ + if (ring->id >= mhi_ctx->ev_ring_start && + ring->id < (mhi_ctx->ev_ring_start + + mhi_ctx->cfg.event_rings)) { + mhi_log(MHI_MSG_VERBOSE, + "not caching event ring %d\n", ring->id); + return 0; + } + + mhi_log(MHI_MSG_VERBOSE, "caching ring %d, start %d, end %d\n", + ring->id, old_offset, wr_offset); + + if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) { + mhi_log(MHI_MSG_ERROR, + "failed to fetch elements for ring %d, start %d, end %d\n", + ring->id, old_offset, wr_offset); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_cache_ring); + +int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring) +{ + uint64_t wr_offset = 0; + uint32_t new_wr_offset = 0; + int32_t rc = 0; + + if (WARN_ON(!ring)) + return -EINVAL; + + switch (ring->type) { + case RING_TYPE_CMD: + rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset); + if (rc) { + pr_err("%s: CMD DB read failed\n", __func__); + return rc; + } + mhi_log(MHI_MSG_VERBOSE, + "ring %d wr_offset from db 0x%x\n", + ring->id, (uint32_t) wr_offset); + break; + case RING_TYPE_ER: + rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset); + if (rc) { + pr_err("%s: EVT DB read failed\n", __func__); + return rc; + } + break; + case RING_TYPE_CH: + rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset); + if (rc) { + pr_err("%s: CH DB read failed\n", __func__); + return rc; + } + mhi_log(MHI_MSG_VERBOSE, + "ring %d wr_offset from db 0x%x\n", + ring->id, (uint32_t) wr_offset); + break; + default: + mhi_log(MHI_MSG_ERROR, "invalid ring type\n"); + return -EINVAL; + } + + new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset); + + mhi_dev_cache_ring(ring, new_wr_offset); + + ring->wr_offset = new_wr_offset; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_update_wr_offset); + +int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset) +{ + union mhi_dev_ring_element_type *el; + + if (WARN_ON(!ring)) + return -EINVAL; + + /* get the element and invoke the respective callback */ + el = &ring->ring_cache[offset]; + + mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr); + mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, offset:%d\n", + el->tre.len, offset); + + if (ring->ring_cb) + ring->ring_cb(ring->mhi_dev, el, (void *)ring); + else + mhi_log(MHI_MSG_ERROR, "No callback registered for ring %d\n", + ring->id); + + return 0; +} +EXPORT_SYMBOL(mhi_dev_process_ring_element); + +int mhi_dev_process_ring(struct mhi_dev_ring *ring) +{ + int rc = 0; + union mhi_dev_ring_element_type *el; + + if (WARN_ON(!ring)) + return -EINVAL; + + mhi_log(MHI_MSG_VERBOSE, + "Before wr update ring_id (%d) element (%d) with wr:%d\n", + ring->id, ring->rd_offset, ring->wr_offset); + + rc = mhi_dev_update_wr_offset(ring); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error updating write-offset for ring %d\n", + ring->id); + return rc; + } + + /* get the element and invoke the respective callback */ + el = &ring->ring_cache[ring->wr_offset]; + + mhi_log(MHI_MSG_VERBOSE, "evnt ptr : 0x%llx\n", el->tre.data_buf_ptr); + mhi_log(MHI_MSG_VERBOSE, "evnt len : 0x%x, wr_offset:%d\n", + el->tre.len, ring->wr_offset); + + if (ring->type == RING_TYPE_CH) { + /* notify the clients that there are elements in the ring */ + rc = mhi_dev_process_ring_element(ring, ring->rd_offset); + if (rc) + pr_err("Error fetching elements\n"); + return rc; + } + mhi_log(MHI_MSG_VERBOSE, + "After ring update ring_id (%d) element (%d) with wr:%d\n", + ring->id, ring->rd_offset, ring->wr_offset); + + while (ring->rd_offset != ring->wr_offset) { + rc = mhi_dev_process_ring_element(ring, ring->rd_offset); + if (rc) { + mhi_log(MHI_MSG_ERROR, + "Error processing ring (%d) element (%d)\n", + ring->id, ring->rd_offset); + return rc; + } + + mhi_log(MHI_MSG_VERBOSE, + "Processing ring (%d) rd_offset:%d, wr_offset:%d\n", + ring->id, ring->rd_offset, ring->wr_offset); + + mhi_dev_ring_inc_index(ring, ring->rd_offset); + } + + if (!(ring->rd_offset == ring->wr_offset)) { + mhi_log(MHI_MSG_ERROR, + "Error with the rd offset/wr offset\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mhi_dev_process_ring); + +int mhi_dev_add_element(struct mhi_dev_ring *ring, + union mhi_dev_ring_element_type *element, + struct event_req *ereq, int size) +{ + uint32_t old_offset = 0; + struct mhi_addr host_addr; + uint32_t num_elem = 1; + uint32_t num_free_elem; + + if (WARN_ON(!ring || !element)) + return -EINVAL; + + mhi_dev_update_wr_offset(ring); + + if (ereq) + num_elem = size / (sizeof(union mhi_dev_ring_element_type)); + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = ring->wr_offset - ring->rd_offset - 1; + else + num_free_elem = ring->ring_size - ring->rd_offset + + ring->wr_offset - 1; + + if (num_free_elem < num_elem) { + mhi_log(MHI_MSG_ERROR, "No space to add %d elem in ring (%d)\n", + num_elem, ring->id); + return -EINVAL; + } + + old_offset = ring->rd_offset; + + if (ereq) { + ring->rd_offset += num_elem; + if (ring->rd_offset >= ring->ring_size) + ring->rd_offset -= ring->ring_size; + } else + mhi_dev_ring_inc_index(ring, ring->rd_offset); + + ring->ring_ctx->generic.rp = (ring->rd_offset * + sizeof(union mhi_dev_ring_element_type)) + + ring->ring_ctx->generic.rbase; + /* + * Write the element, ring_base has to be the + * iomap of the ring_base for memcpy + */ + + if (ring->mhi_dev->use_ipa) + host_addr.host_pa = ring->ring_shadow.host_pa + + sizeof(union mhi_dev_ring_element_type) * old_offset; + else + host_addr.device_va = ring->ring_shadow.device_va + + sizeof(union mhi_dev_ring_element_type) * old_offset; + + if (!ereq) { + /* We're adding only a single ring element */ + host_addr.virt_addr = element; + host_addr.size = sizeof(union mhi_dev_ring_element_type); + + mhi_log(MHI_MSG_VERBOSE, "adding element to ring (%d)\n", + ring->id); + mhi_log(MHI_MSG_VERBOSE, "rd_ofset %d\n", ring->rd_offset); + mhi_log(MHI_MSG_VERBOSE, "type %d\n", element->generic.type); + + mhi_dev_write_to_host(ring->mhi_dev, &host_addr, + NULL, MHI_DEV_DMA_SYNC); + return 0; + } + + /* Adding multiple ring elements */ + if (ring->rd_offset == 0 || (ring->rd_offset > old_offset)) { + /* No wrap-around case */ + host_addr.virt_addr = element; + host_addr.size = size; + mhi_dev_write_to_host(ring->mhi_dev, &host_addr, + ereq, MHI_DEV_DMA_ASYNC); + } else { + /* Wrap-around case - first chunk uses dma sync */ + host_addr.virt_addr = element; + host_addr.size = (ring->ring_size - old_offset) * + sizeof(union mhi_dev_ring_element_type); + mhi_dev_write_to_host(ring->mhi_dev, &host_addr, + NULL, MHI_DEV_DMA_SYNC); + + /* Copy remaining elements */ + if (ring->mhi_dev->use_ipa) + host_addr.host_pa = ring->ring_shadow.host_pa; + else + host_addr.device_va = ring->ring_shadow.device_va; + host_addr.virt_addr = element + (ring->ring_size - old_offset); + host_addr.size = ring->rd_offset * + sizeof(union mhi_dev_ring_element_type); + mhi_dev_write_to_host(ring->mhi_dev, &host_addr, + ereq, MHI_DEV_DMA_ASYNC); + } + return 0; +} +EXPORT_SYMBOL(mhi_dev_add_element); + +int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx, + struct mhi_dev *mhi) +{ + int rc = 0; + uint32_t wr_offset = 0; + uint32_t offset = 0; + + if (WARN_ON(!ring || !ctx || !mhi)) + return -EINVAL; + + ring->ring_ctx = ctx; + ring->ring_size = mhi_dev_ring_num_elems(ring); + ring->rd_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.rp); + ring->wr_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.rp); + ring->mhi_dev = mhi; + + mhi_ring_set_state(ring, RING_STATE_IDLE); + + wr_offset = mhi_dev_ring_addr2ofst(ring, + ring->ring_ctx->generic.wp); + + ring->ring_cache = dma_alloc_coherent(mhi->dev, + ring->ring_size * + sizeof(union mhi_dev_ring_element_type), + &ring->ring_cache_dma_handle, + GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + offset = (uint32_t)(ring->ring_ctx->generic.rbase - + mhi->ctrl_base.host_pa); + + ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset; + ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset; + ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset; + + if (ring->type == RING_TYPE_ER) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va + + (ring->id - mhi->ev_ring_start) * + sizeof(union mhi_dev_ring_ctx)); + else if (ring->type == RING_TYPE_CMD) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va; + else if (ring->type == RING_TYPE_CH) + ring->ring_ctx_shadow = + (union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va + + (ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx)); + + ring->ring_ctx_shadow = ring->ring_ctx; + + if (ring->type != RING_TYPE_ER || ring->type != RING_TYPE_CH) { + rc = mhi_dev_cache_ring(ring, wr_offset); + if (rc) + return rc; + } + + mhi_log(MHI_MSG_VERBOSE, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n", + (uint32_t)ring->ring_ctx->generic.rbase, + (uint32_t)ring->ring_ctx->generic.rp, + (uint32_t)ring->ring_ctx->generic.wp); + ring->wr_offset = wr_offset; + + return rc; +} +EXPORT_SYMBOL(mhi_ring_start); + +void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type, + int id) +{ + if (WARN_ON(!ring)) + return; + + ring->id = id; + ring->state = RING_STATE_UINT; + ring->ring_cb = NULL; + ring->type = type; +} +EXPORT_SYMBOL(mhi_ring_init); + +void mhi_ring_set_cb(struct mhi_dev_ring *ring, + void (*ring_cb)(struct mhi_dev *dev, + union mhi_dev_ring_element_type *el, void *ctx)) +{ + if (WARN_ON(!ring || !ring_cb)) + return; + + ring->ring_cb = ring_cb; +} +EXPORT_SYMBOL(mhi_ring_set_cb); + +void mhi_ring_set_state(struct mhi_dev_ring *ring, + enum mhi_dev_ring_state state) +{ + if (WARN_ON(!ring)) + return; + + if (state > RING_STATE_PENDING) { + pr_err("%s: Invalid ring state\n", __func__); + return; + } + + ring->state = state; +} +EXPORT_SYMBOL(mhi_ring_set_state); + +enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring) +{ + if (WARN_ON(!ring)) + return -EINVAL; + + return ring->state; +} +EXPORT_SYMBOL(mhi_ring_get_state); diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c new file mode 100644 index 0000000000000000000000000000000000000000..e6e8ef132509193f493e4193d2638a740dfa7dba --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_sm.c @@ -0,0 +1,1384 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mhi_hwio.h" +#include "mhi_sm.h" +#include + +#define MHI_SM_DBG(fmt, args...) \ + mhi_log(MHI_MSG_DBG, fmt, ##args) + +#define MHI_SM_ERR(fmt, args...) \ + mhi_log(MHI_MSG_ERROR, fmt, ##args) + +#define MHI_SM_FUNC_ENTRY() MHI_SM_DBG("ENTRY\n") +#define MHI_SM_FUNC_EXIT() MHI_SM_DBG("EXIT\n") + + +static inline const char *mhi_sm_dev_event_str(enum mhi_dev_event state) +{ + const char *str; + + switch (state) { + case MHI_DEV_EVENT_CTRL_TRIG: + str = "MHI_DEV_EVENT_CTRL_TRIG"; + break; + case MHI_DEV_EVENT_M0_STATE: + str = "MHI_DEV_EVENT_M0_STATE"; + break; + case MHI_DEV_EVENT_M1_STATE: + str = "MHI_DEV_EVENT_M1_STATE"; + break; + case MHI_DEV_EVENT_M2_STATE: + str = "MHI_DEV_EVENT_M2_STATE"; + break; + case MHI_DEV_EVENT_M3_STATE: + str = "MHI_DEV_EVENT_M3_STATE"; + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + str = "MHI_DEV_EVENT_HW_ACC_WAKEUP"; + break; + case MHI_DEV_EVENT_CORE_WAKEUP: + str = "MHI_DEV_EVENT_CORE_WAKEUP"; + break; + default: + str = "INVALID MHI_DEV_EVENT"; + } + + return str; +} + +static inline const char *mhi_sm_mstate_str(enum mhi_dev_state state) +{ + const char *str; + + switch (state) { + case MHI_DEV_RESET_STATE: + str = "RESET"; + break; + case MHI_DEV_READY_STATE: + str = "READY"; + break; + case MHI_DEV_M0_STATE: + str = "M0"; + break; + case MHI_DEV_M1_STATE: + str = "M1"; + break; + case MHI_DEV_M2_STATE: + str = "M2"; + break; + case MHI_DEV_M3_STATE: + str = "M3"; + break; + case MHI_DEV_SYSERR_STATE: + str = "SYSTEM ERROR"; + break; + default: + str = "INVALID"; + break; + } + + return str; +} +enum mhi_sm_ep_pcie_state { + MHI_SM_EP_PCIE_LINK_DISABLE, + MHI_SM_EP_PCIE_D0_STATE, + MHI_SM_EP_PCIE_D3_HOT_STATE, + MHI_SM_EP_PCIE_D3_COLD_STATE, +}; + +static inline const char *mhi_sm_dstate_str(enum mhi_sm_ep_pcie_state state) +{ + const char *str; + + switch (state) { + case MHI_SM_EP_PCIE_LINK_DISABLE: + str = "EP_PCIE_LINK_DISABLE"; + break; + case MHI_SM_EP_PCIE_D0_STATE: + str = "D0_STATE"; + break; + case MHI_SM_EP_PCIE_D3_HOT_STATE: + str = "D3_HOT_STATE"; + break; + case MHI_SM_EP_PCIE_D3_COLD_STATE: + str = "D3_COLD_STATE"; + break; + default: + str = "INVALID D-STATE"; + break; + } + + return str; +} + +static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event) +{ + const char *str; + + switch (event) { + case EP_PCIE_EVENT_LINKDOWN: + str = "EP_PCIE_LINKDOWN_EVENT"; + break; + case EP_PCIE_EVENT_LINKUP: + str = "EP_PCIE_LINKUP_EVENT"; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + str = "EP_PCIE_PM_D3_HOT_EVENT"; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + str = "EP_PCIE_PM_D3_COLD_EVENT"; + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + str = "EP_PCIE_PM_RST_DEAST_EVENT"; + break; + case EP_PCIE_EVENT_PM_D0: + str = "EP_PCIE_PM_D0_EVENT"; + break; + case EP_PCIE_EVENT_MHI_A7: + str = "EP_PCIE_MHI_A7"; + break; + default: + str = "INVALID_PCIE_EVENT"; + break; + } + + return str; +} + +/** + * struct mhi_sm_device_event - mhi-core event work + * @event: mhi core state change event + * @work: work struct + * + * used to add work for mhi state change event to mhi_sm_wq + */ +struct mhi_sm_device_event { + enum mhi_dev_event event; + struct work_struct work; +}; + +/** + * struct mhi_sm_ep_pcie_event - ep-pcie event work + * @event: ep-pcie link state change event + * @work: work struct + * + * used to add work for ep-pcie link state change event to mhi_sm_wq + */ +struct mhi_sm_ep_pcie_event { + enum ep_pcie_event event; + struct work_struct work; +}; + +/** + * struct mhi_sm_stats - MHI state machine statistics, viewable using debugfs + * @m0_event_cnt: total number of MHI_DEV_EVENT_M0_STATE events + * @m3_event_cnt: total number of MHI_DEV_EVENT_M3_STATE events + * @hw_acc_wakeup_event_cnt: total number of MHI_DEV_EVENT_HW_ACC_WAKEUP events + * @mhi_core_wakeup_event_cnt: total number of MHI_DEV_EVENT_CORE_WAKEUP events + * @linkup_event_cnt: total number of EP_PCIE_EVENT_LINKUP events + * @rst_deast_event_cnt: total number of EP_PCIE_EVENT_PM_RST_DEAST events + * @d3_hot_event_cnt: total number of EP_PCIE_EVENT_PM_D3_HOT events + * @d3_cold_event_cnt: total number of EP_PCIE_EVENT_PM_D3_COLD events + * @d0_event_cnt: total number of EP_PCIE_EVENT_PM_D0 events + * @linkdown_event_cnt: total number of EP_PCIE_EVENT_LINKDOWN events + */ +struct mhi_sm_stats { + int m0_event_cnt; + int m3_event_cnt; + int hw_acc_wakeup_event_cnt; + int mhi_core_wakeup_event_cnt; + int linkup_event_cnt; + int rst_deast_event_cnt; + int d3_hot_event_cnt; + int d3_cold_event_cnt; + int d0_event_cnt; + int linkdown_event_cnt; +}; + +/** + * struct mhi_sm_dev - MHI state manager context information + * @mhi_state: MHI M state of the MHI device + * @d_state: EP-PCIe D state of the MHI device + * @mhi_dev: MHI device struct pointer + * @mhi_state_lock: mutex for mhi_state + * @syserr_occurred:flag to indicate if a syserr condition has occurred. + * @mhi_sm_wq: workqueue for state change events + * @pending_device_events: number of pending mhi state change events in sm_wq + * @pending_pcie_events: number of pending mhi state change events in sm_wq + * @stats: stats on the handled and pending events + */ +struct mhi_sm_dev { + enum mhi_dev_state mhi_state; + enum mhi_sm_ep_pcie_state d_state; + struct mhi_dev *mhi_dev; + struct mutex mhi_state_lock; + bool syserr_occurred; + struct workqueue_struct *mhi_sm_wq; + atomic_t pending_device_events; + atomic_t pending_pcie_events; + struct mhi_sm_stats stats; +}; +static struct mhi_sm_dev *mhi_sm_ctx; + + +#ifdef CONFIG_DEBUG_FS +#define MHI_SM_MAX_MSG_LEN 1024 +static char dbg_buff[MHI_SM_MAX_MSG_LEN]; +static struct dentry *dent; +static struct dentry *dfile_stats; + +static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos); +static ssize_t mhi_sm_debugfs_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos); + +const struct file_operations mhi_sm_stats_ops = { + .read = mhi_sm_debugfs_read, + .write = mhi_sm_debugfs_write, +}; + +static void mhi_sm_debugfs_init(void) +{ + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("mhi_sm", 0); + if (IS_ERR(dent)) { + MHI_SM_ERR("fail to create folder mhi_sm\n"); + return; + } + + dfile_stats = + debugfs_create_file("stats", read_write_mode, dent, + 0, &mhi_sm_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + MHI_SM_ERR("fail to create file stats\n"); + debugfs_remove_recursive(dent); + } +} + +static void mhi_sm_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} +#else +static inline void mhi_sm_debugfs_init(void) {} +static inline void mhi_sm_debugfs_destroy(void) {} +#endif /*CONFIG_DEBUG_FS*/ + + +static void mhi_sm_mmio_set_mhistatus(enum mhi_dev_state state) +{ + struct mhi_dev *dev = mhi_sm_ctx->mhi_dev; + + MHI_SM_FUNC_ENTRY(); + + switch (state) { + case MHI_DEV_READY_STATE: + MHI_SM_DBG("set MHISTATUS to READY mode\n"); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, 1); + + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + case MHI_DEV_SYSERR_STATE: + MHI_SM_DBG("set MHISTATUS to SYSTEM ERROR mode\n"); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_SYSERR_MASK, + MHISTATUS_SYSERR_SHIFT, 1); + + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + case MHI_DEV_M1_STATE: + case MHI_DEV_M2_STATE: + MHI_SM_ERR("Not supported state, can't set MHISTATUS to %s\n", + mhi_sm_mstate_str(state)); + goto exit; + case MHI_DEV_M0_STATE: + case MHI_DEV_M3_STATE: + MHI_SM_DBG("set MHISTATUS.MHISTATE to %s state\n", + mhi_sm_mstate_str(state)); + mhi_dev_mmio_masked_write(dev, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, state); + break; + default: + MHI_SM_ERR("Invalid mhi state: 0x%x state", state); + goto exit; + } + + mhi_sm_ctx->mhi_state = state; + +exit: + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid + * @curr_state: current MHI state + * @event: MHI state change event + * + * Determine according to MHI state management if the state change event + * is valid on the current mhi state. + * Note: The decision doesn't take into account M1 and M2 states. + * + * Return: true: transition is valid + * false: transition is not valid + */ +static bool mhi_sm_is_legal_event_on_state(enum mhi_dev_state curr_state, + enum mhi_dev_event event) +{ + bool res; + + switch (event) { + case MHI_DEV_EVENT_M0_STATE: + res = (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D0_STATE && + curr_state != MHI_DEV_RESET_STATE); + break; + case MHI_DEV_EVENT_M3_STATE: + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + case MHI_DEV_EVENT_CORE_WAKEUP: + res = (curr_state == MHI_DEV_M3_STATE || + curr_state == MHI_DEV_M0_STATE); + break; + default: + MHI_SM_ERR("Received invalid event: %s\n", + mhi_sm_dev_event_str(event)); + res = false; + break; + } + + return res; +} + +/** + * mhi_sm_is_legal_pcie_event_on_state() - Determine if EP-PCIe linke state + * transition is valid on the current system state. + * @curr_mstate: current MHI state + * @curr_dstate: current ep-pcie link, d, state + * @event: ep-pcie link state change event + * + * Return: true: transition is valid + * false: transition is not valid + */ +static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate, + enum mhi_sm_ep_pcie_state curr_dstate, enum ep_pcie_event event) +{ + bool res; + + switch (event) { + case EP_PCIE_EVENT_LINKUP: + case EP_PCIE_EVENT_LINKDOWN: + res = true; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + res = ((curr_mstate == MHI_DEV_M3_STATE || + curr_mstate == MHI_DEV_READY_STATE || + curr_mstate == MHI_DEV_RESET_STATE) && + curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE); + break; + case EP_PCIE_EVENT_PM_D3_COLD: + res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE || + curr_dstate == MHI_SM_EP_PCIE_D0_STATE); + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE); + break; + case EP_PCIE_EVENT_PM_D0: + res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE || + curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE); + break; + case EP_PCIE_EVENT_MHI_A7: + res = true; + break; + default: + MHI_SM_ERR("Invalid ep_pcie event, received: %s\n", + mhi_sm_pcie_event_str(event)); + res = false; + break; + } + + return res; +} + +/** + * mhi_sm_change_to_M0() - switch to M0 state. + * + * Switch MHI-device state to M0, if possible according to MHI state machine. + * Notify the MHI-host on the transition, in case MHI is suspended- resume MHI. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_change_to_M0(void) +{ + enum mhi_dev_state old_state; + struct ep_pcie_msi_config cfg; + int res = -EINVAL; + + MHI_SM_FUNC_ENTRY(); + + old_state = mhi_sm_ctx->mhi_state; + + if (old_state == MHI_DEV_M0_STATE) { + MHI_SM_DBG("Nothing to do, already in M0 state\n"); + res = 0; + goto exit; + } else if (old_state == MHI_DEV_M3_STATE || + old_state == MHI_DEV_READY_STATE) { + /* Retrieve MHI configuration*/ + res = mhi_dev_config_outbound_iatu(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Fail to configure iATU, returned %d\n", + res); + goto exit; + } + res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle, + &cfg); + if (res) { + MHI_SM_ERR("Error retrieving pcie msi logic\n"); + goto exit; + } + res = mhi_pcie_config_db_routing(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Error configuring db routing\n"); + goto exit; + + } + } else { + MHI_SM_ERR("unexpected old_state: %s\n", + mhi_sm_mstate_str(old_state)); + goto exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_M0_STATE); + + /* Tell the host, device move to M0 */ + if (old_state == MHI_DEV_M3_STATE) { + if (mhi_sm_ctx->mhi_dev->use_ipa) { + res = ipa_dma_enable(); + if (res) { + MHI_SM_ERR("IPA enable failed\n"); + return res; + } + } + + res = mhi_dev_resume(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Failed resuming mhi core, returned %d", + res); + goto exit; + } + + res = ipa_mhi_resume(); + if (res) { + MHI_SM_ERR("Failed resuming ipa_mhi, returned %d", + res); + goto exit; + } + } + + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_M0_STATE); + if (res) { + MHI_SM_ERR("Failed to send event %s to host, returned %d\n", + mhi_sm_dev_event_str(MHI_DEV_EVENT_M0_STATE), res); + goto exit; + } + + if (old_state == MHI_DEV_READY_STATE) { + /* Tell the host the EE */ + res = mhi_dev_send_ee_event(mhi_sm_ctx->mhi_dev, 2); + if (res) { + MHI_SM_ERR("failed sending EE event to host\n"); + goto exit; + } + } + res = 0; + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_change_to_M3() - switch to M3 state + * + * Switch MHI-device state to M3, if possible according to MHI state machine. + * Suspend MHI traffic and notify the host on the transition. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_change_to_M3(void) +{ + enum mhi_dev_state old_state; + int res = 0; + + MHI_SM_FUNC_ENTRY(); + + old_state = mhi_sm_ctx->mhi_state; + if (old_state == MHI_DEV_M3_STATE) { + MHI_SM_DBG("Nothing to do, already in M3 state\n"); + res = 0; + goto exit; + } + /* Suspending MHI operation*/ + res = mhi_dev_suspend(mhi_sm_ctx->mhi_dev); + if (res) { + MHI_SM_ERR("Failed to suspend mhi_core, returned %d\n", res); + goto exit; + } + res = ipa_mhi_suspend(true); + if (res) { + MHI_SM_ERR("Failed to suspend ipa_mhi, returned %d\n", res); + goto exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_M3_STATE); + + /* tell the host, device move to M3 */ + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_M3_STATE); + if (res) { + MHI_SM_ERR("Failed sendind event: %s to mhi_host\n", + mhi_sm_dev_event_str(MHI_DEV_EVENT_M3_STATE)); + goto exit; + } + + if (mhi_sm_ctx->mhi_dev->use_ipa) { + res = ipa_dma_disable(); + if (res) { + MHI_SM_ERR("IPA disable failed\n"); + return res; + } + } + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_wakeup_host() - wakeup MHI-host + *@event: MHI state chenge event + * + * Sends wekup event to MHI-host via EP-PCIe, in case MHI is in M3 state. + * + * Return: 0:success + * negative: failure + */ +static int mhi_sm_wakeup_host(enum mhi_dev_event event) +{ + int res = 0; + + MHI_SM_FUNC_ENTRY(); + + if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) { + /* + * ep_pcie driver is responsible to send the right wakeup + * event, assert WAKE#, according to Link state + */ + res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle); + if (res) { + MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n", + res); + goto exit; + } + } else { + MHI_SM_DBG("Nothing to do, Host is already awake\n"); + } + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_handle_syserr() - switch to system error state. + * + * Called on system error condition. + * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device. + * Synchronic function. + * + * Return: 0: success + * negative: failure + */ +static int mhi_sm_handle_syserr(void) +{ + int res; + enum ep_pcie_link_status link_status; + bool link_enabled = false; + + MHI_SM_FUNC_ENTRY(); + + MHI_SM_ERR("Start handling SYSERR, MHI state: %s and %s", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + if (mhi_sm_ctx->mhi_state == MHI_DEV_SYSERR_STATE) { + MHI_SM_DBG("Nothing to do, already in SYSERR state\n"); + return 0; + } + + mhi_sm_ctx->syserr_occurred = true; + link_status = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle); + if (link_status == EP_PCIE_LINK_DISABLED) { + /* try to power on ep-pcie, restore mmio, and wakup host */ + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_POWER_ON); + if (res) { + MHI_SM_ERR("Failed to power on ep-pcie, returned %d\n", + res); + goto exit; + } + mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev); + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_AST_WAKE | EP_PCIE_OPT_ENUM); + if (res) { + MHI_SM_ERR("Failed to wakup host and enable ep-pcie\n"); + goto exit; + } + } + + link_enabled = true; + mhi_sm_mmio_set_mhistatus(MHI_DEV_SYSERR_STATE); + + /* Tell the host, device move to SYSERR state */ + res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev, + MHI_DEV_SYSERR_STATE); + if (res) { + MHI_SM_ERR("Failed to send %s state change event to host\n", + mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE)); + goto exit; + } + +exit: + if (!link_enabled) + MHI_SM_ERR("EP-PCIE Link is disable cannot set MMIO to %s\n", + mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE)); + + MHI_SM_ERR("/n/n/nError ON DEVICE !!!!/n/n/n"); + WARN_ON(1); + + MHI_SM_FUNC_EXIT(); + return res; +} + +/** + * mhi_sm_dev_event_manager() - performs MHI state change + * @work: work_struct used by the work queue + * + * This function is called from mhi_sm_wq, and performs mhi state change + * if possible according to MHI state machine + */ +static void mhi_sm_dev_event_manager(struct work_struct *work) +{ + int res; + struct mhi_sm_device_event *chg_event = container_of(work, + struct mhi_sm_device_event, work); + + MHI_SM_FUNC_ENTRY(); + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + MHI_SM_DBG("Start handling %s event, current states: %s & %s\n", + mhi_sm_dev_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + if (mhi_sm_ctx->syserr_occurred) { + MHI_SM_DBG("syserr occurred, Ignoring %s\n", + mhi_sm_dev_event_str(chg_event->event)); + goto unlock_and_exit; + } + + if (!mhi_sm_is_legal_event_on_state(mhi_sm_ctx->mhi_state, + chg_event->event)) { + MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n", + mhi_sm_dev_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + } + + switch (chg_event->event) { + case MHI_DEV_EVENT_M0_STATE: + res = mhi_sm_change_to_M0(); + if (res) + MHI_SM_ERR("Failed switching to M0 state\n"); + break; + case MHI_DEV_EVENT_M3_STATE: + res = mhi_sm_change_to_M3(); + if (res) + MHI_SM_ERR("Failed switching to M3 state\n"); + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + case MHI_DEV_EVENT_CORE_WAKEUP: + res = mhi_sm_wakeup_host(chg_event->event); + if (res) + MHI_SM_ERR("Failed to wakeup MHI host\n"); + break; + case MHI_DEV_EVENT_CTRL_TRIG: + case MHI_DEV_EVENT_M1_STATE: + case MHI_DEV_EVENT_M2_STATE: + MHI_SM_ERR("Error: %s event is not supported\n", + mhi_sm_dev_event_str(chg_event->event)); + break; + default: + MHI_SM_ERR("Error: Invalid event, 0x%x", chg_event->event); + break; + } +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + atomic_dec(&mhi_sm_ctx->pending_device_events); + kfree(chg_event); + + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_sm_pcie_event_manager() - performs EP-PCIe linke state change + * @work: work_struct used by the work queue + * + * This function is called from mhi_sm_wq, and performs ep-pcie link state + * change if possible according to current system state and MHI state machine + */ +static void mhi_sm_pcie_event_manager(struct work_struct *work) +{ + int res; + enum mhi_sm_ep_pcie_state old_dstate; + struct mhi_sm_ep_pcie_event *chg_event = container_of(work, + struct mhi_sm_ep_pcie_event, work); + enum ep_pcie_event pcie_event = chg_event->event; + unsigned long flags; + + MHI_SM_FUNC_ENTRY(); + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + old_dstate = mhi_sm_ctx->d_state; + + MHI_SM_DBG("Start handling %s event, current MHI state %s and %s\n", + mhi_sm_pcie_event_str(chg_event->event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(old_dstate)); + + if (mhi_sm_ctx->syserr_occurred && + pcie_event != EP_PCIE_EVENT_LINKDOWN) { + MHI_SM_DBG("SYSERR occurred. Ignoring %s", + mhi_sm_pcie_event_str(pcie_event)); + goto unlock_and_exit; + } + + if (!mhi_sm_is_legal_pcie_event_on_state(mhi_sm_ctx->mhi_state, + old_dstate, pcie_event)) { + MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n", + mhi_sm_pcie_event_str(pcie_event), + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state), + mhi_sm_dstate_str(old_dstate)); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + } + + switch (pcie_event) { + case EP_PCIE_EVENT_LINKUP: + if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_LINK_DISABLE) + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + case EP_PCIE_EVENT_LINKDOWN: + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("Failed switching to SYSERR state\n"); + goto unlock_and_exit; + case EP_PCIE_EVENT_PM_D3_HOT: + if (old_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE) { + MHI_SM_DBG("cannot move to D3_HOT from D3_COLD\n"); + break; + } + /* Backup MMIO is done on the callback function*/ + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_HOT_STATE; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + if (old_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE) { + MHI_SM_DBG("Nothing to do, already in D3_COLD state\n"); + break; + } + ep_pcie_disable_endpoint(mhi_sm_ctx->mhi_dev->phandle); + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_COLD_STATE; + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) { + MHI_SM_DBG("Nothing to do, already in D0 state\n"); + break; + } + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_POWER_ON); + if (res) { + MHI_SM_ERR("Failed to power on ep_pcie, returned %d\n", + res); + goto unlock_and_exit; + } + + mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev); + + spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags); + if ((mhi_sm_ctx->mhi_dev->mhi_int) && + (!mhi_sm_ctx->mhi_dev->mhi_int_en)) { + enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq); + mhi_sm_ctx->mhi_dev->mhi_int_en = true; + MHI_SM_DBG("Enable MHI IRQ during PCIe DEAST"); + } + spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags); + + res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_OPT_ENUM); + if (res) { + MHI_SM_ERR("ep-pcie failed to link train, return %d\n", + res); + goto unlock_and_exit; + } + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + case EP_PCIE_EVENT_PM_D0: + if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) { + MHI_SM_DBG("Nothing to do, already in D0 state\n"); + break; + } + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + break; + default: + MHI_SM_ERR("Invalid EP_PCIE event, received 0x%x\n", + pcie_event); + break; + } + +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + atomic_dec(&mhi_sm_ctx->pending_pcie_events); + kfree(chg_event); + + MHI_SM_FUNC_EXIT(); +} + +/** + * mhi_dev_sm_init() - Initialize MHI state machine. + * @mhi_dev: pointer to mhi device instance + * + * Assuming MHISTATUS register is in RESET state. + * + * Return: 0 success + * -EINVAL: invalid param + * -ENOMEM: allocating memory error + */ +int mhi_dev_sm_init(struct mhi_dev *mhi_dev) +{ + int res; + enum ep_pcie_link_status link_state; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_dev) { + MHI_SM_ERR("Fail: Null argument\n"); + return -EINVAL; + } + + mhi_sm_ctx = devm_kzalloc(mhi_dev->dev, sizeof(*mhi_sm_ctx), + GFP_KERNEL); + if (!mhi_sm_ctx) + return -ENOMEM; + + /*init debugfs*/ + mhi_sm_debugfs_init(); + mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq"); + if (!mhi_sm_ctx->mhi_sm_wq) { + MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n"); + res = -ENOMEM; + goto fail_init_wq; + } + + mutex_init(&mhi_sm_ctx->mhi_state_lock); + mhi_sm_ctx->mhi_dev = mhi_dev; + mhi_sm_ctx->mhi_state = MHI_DEV_RESET_STATE; + mhi_sm_ctx->syserr_occurred = false; + atomic_set(&mhi_sm_ctx->pending_device_events, 0); + atomic_set(&mhi_sm_ctx->pending_pcie_events, 0); + + link_state = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle); + if (link_state == EP_PCIE_LINK_ENABLED) + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + else + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_LINK_DISABLE; + + MHI_SM_FUNC_EXIT(); + return 0; + +fail_init_wq: + mhi_sm_ctx = NULL; + mhi_sm_debugfs_destroy(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_init); + +int mhi_dev_sm_exit(struct mhi_dev *mhi_dev) +{ + MHI_SM_FUNC_ENTRY(); + + atomic_set(&mhi_sm_ctx->pending_device_events, 0); + atomic_set(&mhi_sm_ctx->pending_pcie_events, 0); + mhi_sm_debugfs_destroy(); + flush_workqueue(mhi_sm_ctx->mhi_sm_wq); + destroy_workqueue(mhi_sm_ctx->mhi_sm_wq); + ipa_dma_destroy(); + mutex_destroy(&mhi_sm_ctx->mhi_state_lock); + devm_kfree(mhi_dev->dev, mhi_sm_ctx); + mhi_sm_ctx = NULL; + + return 0; +} +EXPORT_SYMBOL(mhi_dev_sm_exit); + +/** + * mhi_dev_sm_get_mhi_state() -Get current MHI state. + * @state: return param + * + * Returns the current MHI state of the state machine. + * + * Return: 0 success + * -EINVAL: invalid param + * -EFAULT: state machine isn't initialized + */ +int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state) +{ + MHI_SM_FUNC_ENTRY(); + + if (!state) { + MHI_SM_ERR("Fail: Null argument\n"); + return -EINVAL; + } + if (!mhi_sm_ctx) { + MHI_SM_ERR("Fail: MHI SM is not initialized\n"); + return -EFAULT; + } + *state = mhi_sm_ctx->mhi_state; + MHI_SM_DBG("state machine states are: %s and %s\n", + mhi_sm_mstate_str(*state), + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + + MHI_SM_FUNC_EXIT(); + return 0; +} +EXPORT_SYMBOL(mhi_dev_sm_get_mhi_state); + +/** + * mhi_dev_sm_set_ready() -Set MHI state to ready. + * + * Set MHISTATUS register in mmio to READY. + * Synchronic function. + * + * Return: 0: success + * EINVAL: mhi state manager is not initialized + * EPERM: Operation not permitted as EP PCIE link is desable. + * EFAULT: MHI state is not RESET + * negative: other failure + */ +int mhi_dev_sm_set_ready(void) +{ + int res = 0; + int is_ready; + enum mhi_dev_state state; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM isn't initialized\n"); + return -EINVAL; + } + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + if (mhi_sm_ctx->mhi_state != MHI_DEV_RESET_STATE) { + MHI_SM_ERR("Can not switch to READY state from %s state\n", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state)); + res = -EFAULT; + goto unlock_and_exit; + } + + if (mhi_sm_ctx->d_state != MHI_SM_EP_PCIE_D0_STATE) { + if (ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle) == + EP_PCIE_LINK_ENABLED) { + mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE; + } else { + MHI_SM_ERR("ERROR: ep-pcie link is not enabled\n"); + res = -EPERM; + goto unlock_and_exit; + } + } + + /* verify that MHISTATUS is configured to RESET*/ + mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, + MHISTATUS, MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + + mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &is_ready); + + if (state != MHI_DEV_RESET_STATE || is_ready) { + MHI_SM_ERR("Cannot switch to READY, MHI is not in RESET state"); + MHI_SM_ERR("-MHISTATE: %s, READY bit: 0x%x\n", + mhi_sm_mstate_str(state), is_ready); + res = -EFAULT; + goto unlock_and_exit; + } + mhi_sm_mmio_set_mhistatus(MHI_DEV_READY_STATE); + res = 0; + +unlock_and_exit: + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_set_ready); + +/** + * mhi_dev_notify_sm_event() - MHI-core notify SM on trigger occurred + * @event - enum of the requierd operation. + * + * Asynchronic function. + * No trigger is sent after operation is done. + * + * Return: 0: success + * -EFAULT: SM isn't initialized or event isn't supported + * -ENOMEM: allocating memory error + * -EINVAL: invalied event + */ +int mhi_dev_notify_sm_event(enum mhi_dev_event event) +{ + struct mhi_sm_device_event *state_change_event; + int res; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return -EFAULT; + } + + MHI_SM_DBG("received: %s\n", + mhi_sm_dev_event_str(event)); + + switch (event) { + case MHI_DEV_EVENT_M0_STATE: + mhi_sm_ctx->stats.m0_event_cnt++; + break; + case MHI_DEV_EVENT_M3_STATE: + mhi_sm_ctx->stats.m3_event_cnt++; + break; + case MHI_DEV_EVENT_HW_ACC_WAKEUP: + mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt++; + break; + case MHI_DEV_EVENT_CORE_WAKEUP: + mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt++; + break; + case MHI_DEV_EVENT_CTRL_TRIG: + case MHI_DEV_EVENT_M1_STATE: + case MHI_DEV_EVENT_M2_STATE: + MHI_SM_ERR("Not supported event: %s\n", + mhi_sm_dev_event_str(event)); + res = -EFAULT; + goto exit; + default: + MHI_SM_ERR("Invalid event, received: 0x%x event\n", event); + res = -EINVAL; + goto exit; + } + + /*init work and push to queue*/ + state_change_event = kzalloc(sizeof(*state_change_event), GFP_ATOMIC); + if (!state_change_event) { + MHI_SM_ERR("kzalloc error\n"); + res = -ENOMEM; + goto exit; + } + + state_change_event->event = event; + INIT_WORK(&state_change_event->work, mhi_sm_dev_event_manager); + atomic_inc(&mhi_sm_ctx->pending_device_events); + queue_work(mhi_sm_ctx->mhi_sm_wq, &state_change_event->work); + res = 0; + +exit: + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_notify_sm_event); + +/** + * mhi_dev_sm_pcie_handler() - handler of ep_pcie events + * @notify - pointer to structure contains the ep_pcie event + * + * Callback function, called by ep_pcie driver to notify on pcie state change + * Asynchronic function + */ +void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify) +{ + struct mhi_sm_ep_pcie_event *dstate_change_evt; + enum ep_pcie_event event; + unsigned long flags; + + MHI_SM_FUNC_ENTRY(); + + if (WARN_ON(!notify)) { + MHI_SM_ERR("Null argument - notify\n"); + return; + } + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return; + } + + event = notify->event; + MHI_SM_DBG("received: %s\n", + mhi_sm_pcie_event_str(event)); + + dstate_change_evt = kzalloc(sizeof(*dstate_change_evt), GFP_ATOMIC); + if (!dstate_change_evt) + goto exit; + + switch (event) { + case EP_PCIE_EVENT_LINKUP: + mhi_sm_ctx->stats.linkup_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D3_COLD: + mhi_sm_ctx->stats.d3_cold_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D3_HOT: + mhi_sm_ctx->stats.d3_hot_event_cnt++; + + spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags); + if ((mhi_sm_ctx->mhi_dev->mhi_int) && + (mhi_sm_ctx->mhi_dev->mhi_int_en)) { + disable_irq(mhi_sm_ctx->mhi_dev->mhi_irq); + mhi_sm_ctx->mhi_dev->mhi_int_en = false; + MHI_SM_DBG("Disable MHI IRQ during D3 HOT"); + } + spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags); + + mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev); + break; + case EP_PCIE_EVENT_PM_RST_DEAST: + mhi_sm_ctx->stats.rst_deast_event_cnt++; + break; + case EP_PCIE_EVENT_PM_D0: + mhi_sm_ctx->stats.d0_event_cnt++; + + spin_lock_irqsave(&mhi_sm_ctx->mhi_dev->lock, flags); + if ((mhi_sm_ctx->mhi_dev->mhi_int) && + (!mhi_sm_ctx->mhi_dev->mhi_int_en)) { + enable_irq(mhi_sm_ctx->mhi_dev->mhi_irq); + mhi_sm_ctx->mhi_dev->mhi_int_en = true; + MHI_SM_DBG("Enable MHI IRQ during D0"); + } + spin_unlock_irqrestore(&mhi_sm_ctx->mhi_dev->lock, flags); + break; + case EP_PCIE_EVENT_LINKDOWN: + mhi_sm_ctx->stats.linkdown_event_cnt++; + mhi_sm_ctx->syserr_occurred = true; + MHI_SM_ERR("got %s, ERROR occurred\n", + mhi_sm_pcie_event_str(event)); + break; + case EP_PCIE_EVENT_MHI_A7: + ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle, + EP_PCIE_INT_EVT_MHI_A7, false); + mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev); + kfree(dstate_change_evt); + goto exit; + default: + MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n", + event); + kfree(dstate_change_evt); + goto exit; + } + + dstate_change_evt->event = event; + INIT_WORK(&dstate_change_evt->work, mhi_sm_pcie_event_manager); + queue_work(mhi_sm_ctx->mhi_sm_wq, &dstate_change_evt->work); + atomic_inc(&mhi_sm_ctx->pending_pcie_events); + +exit: + MHI_SM_FUNC_EXIT(); +} +EXPORT_SYMBOL(mhi_dev_sm_pcie_handler); + +/** + * mhi_dev_sm_syserr() - switch to system error state. + * + * Called on system error condition. + * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device. + * Synchronic function. + * + * Return: 0: success + * negative: failure + */ +int mhi_dev_sm_syserr(void) +{ + int res; + + MHI_SM_FUNC_ENTRY(); + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Failed, MHI SM is not initialized\n"); + return -EFAULT; + } + + mutex_lock(&mhi_sm_ctx->mhi_state_lock); + res = mhi_sm_handle_syserr(); + if (res) + MHI_SM_ERR("mhi_sm_handle_syserr failed %d\n", res); + mutex_unlock(&mhi_sm_ctx->mhi_state_lock); + + MHI_SM_FUNC_EXIT(); + return res; +} +EXPORT_SYMBOL(mhi_dev_sm_syserr); + +static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!mhi_sm_ctx) { + nbytes = scnprintf(dbg_buff, MHI_SM_MAX_MSG_LEN, + "Not initialized\n"); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "*************** MHI State machine status ***************\n"); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D state: %s\n", + mhi_sm_dstate_str(mhi_sm_ctx->d_state)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M state: %s\n", + mhi_sm_mstate_str(mhi_sm_ctx->mhi_state)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "pending device events: %d\n", + atomic_read(&mhi_sm_ctx->pending_device_events)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "pending pcie events: %d\n", + atomic_read(&mhi_sm_ctx->pending_pcie_events)); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "*************** Statistics ***************\n"); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M0 events: %d\n", mhi_sm_ctx->stats.m0_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "M3 events: %d\n", mhi_sm_ctx->stats.m3_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "HW_ACC wakeup events: %d\n", + mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "CORE wakeup events: %d\n", + mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "Linkup events: %d\n", + mhi_sm_ctx->stats.linkup_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "De-assert PERST events: %d\n", + mhi_sm_ctx->stats.rst_deast_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D0 events: %d\n", + mhi_sm_ctx->stats.d0_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D3_HOT events: %d\n", + mhi_sm_ctx->stats.d3_hot_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "D3_COLD events:%d\n", + mhi_sm_ctx->stats.d3_cold_event_cnt); + nbytes += scnprintf(dbg_buff + nbytes, + MHI_SM_MAX_MSG_LEN - nbytes, + "Linkdown events: %d\n", + mhi_sm_ctx->stats.linkdown_event_cnt); + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t mhi_sm_debugfs_write(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (!mhi_sm_ctx) { + MHI_SM_ERR("Not initialized\n"); + return -EFAULT; + } + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + + switch (in_num) { + case 0: + if (atomic_read(&mhi_sm_ctx->pending_device_events) || + atomic_read(&mhi_sm_ctx->pending_pcie_events)) + MHI_SM_DBG("Note, there are pending events in sm_wq\n"); + + memset(&mhi_sm_ctx->stats, 0, sizeof(struct mhi_sm_stats)); + break; + default: + MHI_SM_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + + return count; +} diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.h b/drivers/platform/msm/mhi_dev/mhi_sm.h new file mode 100644 index 0000000000000000000000000000000000000000..4b9307d6c71d3a29a2f7d519f454275200a3c162 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_sm.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2015,2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MHI_SM_H +#define MHI_SM_H + +#include "mhi.h" +#include +#include + + +/** + * enum mhi_dev_event - MHI state change events + * @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event. + * Not supported,for future use + * @MHI_DEV_EVENT_M0_STATE: M0 state change event + * @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use + * @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use + * @MHI_DEV_EVENT_M3_STATE: M0 state change event + * @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup + * @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup + */ +enum mhi_dev_event { + MHI_DEV_EVENT_CTRL_TRIG, + MHI_DEV_EVENT_M0_STATE, + MHI_DEV_EVENT_M1_STATE, + MHI_DEV_EVENT_M2_STATE, + MHI_DEV_EVENT_M3_STATE, + MHI_DEV_EVENT_HW_ACC_WAKEUP, + MHI_DEV_EVENT_CORE_WAKEUP, + MHI_DEV_EVENT_MAX +}; + +int mhi_dev_sm_init(struct mhi_dev *dev); +int mhi_dev_sm_exit(struct mhi_dev *dev); +int mhi_dev_sm_set_ready(void); +int mhi_dev_notify_sm_event(enum mhi_dev_event event); +int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state); +int mhi_dev_sm_syserr(void); +void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify); + +#endif /* MHI_SM_H */ + diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c new file mode 100644 index 0000000000000000000000000000000000000000..0d85cc6b9ac79998199b3c0c1685d38fa58ea812 --- /dev/null +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -0,0 +1,1459 @@ +/* Copyright (c) 2015,2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" + +#define MHI_SOFTWARE_CLIENT_START 0 +#define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2) +#define MHI_UCI_IPC_LOG_PAGES (100) + +/* Max number of MHI write request structures (used in async writes) */ +#define MAX_UCI_WR_REQ 10 +#define MAX_NR_TRBS_PER_CHAN 9 +#define MHI_QTI_IFACE_ID 4 +#define DEVICE_NAME "mhi" +#define MAX_DEVICE_NAME_SIZE 80 + +#define MHI_UCI_ASYNC_READ_TIMEOUT msecs_to_jiffies(100) + +enum uci_dbg_level { + UCI_DBG_VERBOSE = 0x0, + UCI_DBG_INFO = 0x1, + UCI_DBG_DBG = 0x2, + UCI_DBG_WARNING = 0x3, + UCI_DBG_ERROR = 0x4, + UCI_DBG_CRITICAL = 0x5, + UCI_DBG_reserved = 0x80000000 +}; + +static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL; +static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO; +static void *mhi_uci_ipc_log; + + +enum mhi_chan_dir { + MHI_DIR_INVALID = 0x0, + MHI_DIR_OUT = 0x1, + MHI_DIR_IN = 0x2, + MHI_DIR__reserved = 0x80000000 +}; + +struct chan_attr { + /* SW maintained channel id */ + enum mhi_client_channel chan_id; + /* maximum buffer size for this channel */ + size_t max_packet_size; + /* number of buffers supported in this channel */ + u32 nr_trbs; + /* direction of the channel, see enum mhi_chan_dir */ + enum mhi_chan_dir dir; + /* need to register mhi channel state change callback */ + bool register_cb; + /* Name of char device */ + char *device_name; +}; + +/* UCI channel attributes table */ +static const struct chan_attr uci_chan_attr_table[] = { + { + MHI_CLIENT_LOOPBACK_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_LOOPBACK_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_SAHARA_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_SAHARA_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_EFS_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_EFS_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_MBIM_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_MBIM_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_QMI_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_QMI_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_IP_CTRL_0_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_IP_CTRL_0_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_IP_CTRL_1_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_IP_CTRL_1_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_DUN_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + false, + NULL + }, + { + MHI_CLIENT_DUN_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + false, + NULL + }, + { + MHI_CLIENT_ADB_OUT, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_OUT, + true, + NULL + }, + { + MHI_CLIENT_ADB_IN, + TRB_MAX_DATA_SIZE, + MAX_NR_TRBS_PER_CHAN, + MHI_DIR_IN, + true, + "android_adb" + }, +}; + +struct uci_ctrl { + wait_queue_head_t ctrl_wq; + struct mhi_uci_ctxt_t *uci_ctxt; + atomic_t ctrl_data_update; +}; + +struct uci_client { + u32 client_index; + /* write channel - always odd*/ + u32 out_chan; + /* read channel - always even */ + u32 in_chan; + struct mhi_dev_client *out_handle; + struct mhi_dev_client *in_handle; + const struct chan_attr *in_chan_attr; + const struct chan_attr *out_chan_attr; + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + atomic_t read_data_ready; + struct device *dev; + atomic_t ref_count; + int mhi_status; + void *pkt_loc; + size_t pkt_size; + struct mhi_dev_iov *in_buf_list; + atomic_t write_data_ready; + atomic_t mhi_chans_open; + struct mhi_uci_ctxt_t *uci_ctxt; + struct mutex in_chan_lock; + struct mutex out_chan_lock; + spinlock_t wr_req_lock; + unsigned int f_flags; + struct mhi_req *wreqs; + struct list_head wr_req_list; + struct completion read_done; + int (*send)(struct uci_client*, void*, u32); + int (*read)(struct uci_client*, struct mhi_req*, int*); +}; + +struct mhi_uci_ctxt_t { + struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT]; + struct uci_ctrl ctrl_handle; + void (*event_notifier)(struct mhi_dev_client_cb_reason *cb); + dev_t start_ctrl_nr; + struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS]; + dev_t ctrl_nr; + struct cdev *cdev_ctrl; + struct device *dev; + struct class *mhi_uci_class; + atomic_t mhi_disabled; + atomic_t mhi_enable_notif_wq_active; +}; + +#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2) +#define CLIENT_TO_CHAN(_CLIENT_NR) (_CLIENT_NR * 2) + +#define uci_log(_msg_lvl, _msg, ...) do { \ + if (_msg_lvl >= mhi_uci_msg_lvl) { \ + pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ + } \ + if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \ + ipc_log_string(mhi_uci_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ + } \ +} while (0) + + +module_param(mhi_uci_msg_lvl, uint, 0644); +MODULE_PARM_DESC(mhi_uci_msg_lvl, "uci dbg lvl"); + +module_param(mhi_uci_ipc_log_lvl, uint, 0644); +MODULE_PARM_DESC(mhi_uci_ipc_log_lvl, "ipc dbg lvl"); + +static ssize_t mhi_uci_client_read(struct file *file, char __user *buf, + size_t count, loff_t *offp); +static ssize_t mhi_uci_ctrl_client_read(struct file *file, char __user *buf, + size_t count, loff_t *offp); +static ssize_t mhi_uci_client_write(struct file *file, + const char __user *buf, size_t count, loff_t *offp); +static int mhi_uci_client_open(struct inode *mhi_inode, struct file*); +static int mhi_uci_ctrl_open(struct inode *mhi_inode, struct file*); +static int mhi_uci_client_release(struct inode *mhi_inode, + struct file *file_handle); +static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait); +static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait); +static struct mhi_uci_ctxt_t uci_ctxt; + +static int mhi_init_read_chan(struct uci_client *client_handle, + enum mhi_client_channel chan) +{ + int rc = 0; + u32 i, j; + const struct chan_attr *in_chan_attr; + size_t buf_size; + void *data_loc; + + if (client_handle == NULL) { + uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n"); + return -EINVAL; + } + if (chan >= MHI_MAX_SOFTWARE_CHANNELS) { + uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan); + return -EINVAL; + } + + in_chan_attr = client_handle->in_chan_attr; + if (!in_chan_attr) { + uci_log(UCI_DBG_ERROR, "Null channel attributes for chan %d\n", + client_handle->in_chan); + return -EINVAL; + } + + /* Init the completion event for read */ + init_completion(&client_handle->read_done); + + buf_size = in_chan_attr->max_packet_size; + for (i = 0; i < (in_chan_attr->nr_trbs); i++) { + data_loc = kmalloc(buf_size, GFP_KERNEL); + if (!data_loc) { + rc = -ENOMEM; + goto free_memory; + } + client_handle->in_buf_list[i].addr = data_loc; + client_handle->in_buf_list[i].buf_size = buf_size; + } + + return rc; + +free_memory: + for (j = 0; j < i; j++) + kfree(client_handle->in_buf_list[j].addr); + + return rc; +} + +static void mhi_uci_write_completion_cb(void *req) +{ + struct mhi_req *ureq = req; + struct uci_client *uci_handle; + unsigned long flags; + + uci_handle = (struct uci_client *)ureq->context; + kfree(ureq->buf); + ureq->buf = NULL; + + spin_lock_irqsave(&uci_handle->wr_req_lock, flags); + list_add_tail(&ureq->list, &uci_handle->wr_req_list); + spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags); +} + +static void mhi_uci_read_completion_cb(void *req) +{ + struct mhi_req *ureq = req; + struct uci_client *uci_handle; + + uci_handle = (struct uci_client *)ureq->context; + complete(&uci_handle->read_done); +} + +static int mhi_uci_send_sync(struct uci_client *uci_handle, + void *data_loc, u32 size) +{ + struct mhi_req ureq; + int ret_val; + + ureq.client = uci_handle->out_handle; + ureq.buf = data_loc; + ureq.len = size; + ureq.chan = uci_handle->out_chan; + ureq.mode = IPA_DMA_SYNC; + + ret_val = mhi_dev_write_channel(&ureq); + + kfree(data_loc); + return ret_val; +} + +static int mhi_uci_send_async(struct uci_client *uci_handle, + void *data_loc, u32 size) +{ + int bytes_to_write; + struct mhi_req *ureq; + + uci_log(UCI_DBG_VERBOSE, + "Got async write for ch %d of size %d\n", + uci_handle->out_chan, size); + + spin_lock_irq(&uci_handle->wr_req_lock); + if (list_empty(&uci_handle->wr_req_list)) { + uci_log(UCI_DBG_ERROR, "Write request pool empty\n"); + spin_unlock_irq(&uci_handle->wr_req_lock); + return -ENOMEM; + } + ureq = container_of(uci_handle->wr_req_list.next, + struct mhi_req, list); + list_del_init(&ureq->list); + spin_unlock_irq(&uci_handle->wr_req_lock); + + ureq->client = uci_handle->out_handle; + ureq->context = uci_handle; + ureq->buf = data_loc; + ureq->len = size; + ureq->chan = uci_handle->out_chan; + ureq->mode = IPA_DMA_ASYNC; + ureq->client_cb = mhi_uci_write_completion_cb; + ureq->snd_cmpl = 1; + + bytes_to_write = mhi_dev_write_channel(ureq); + if (bytes_to_write != size) + goto error_async_transfer; + + return bytes_to_write; + +error_async_transfer: + kfree(data_loc); + ureq->buf = NULL; + spin_lock_irq(&uci_handle->wr_req_lock); + list_add_tail(&ureq->list, &uci_handle->wr_req_list); + spin_unlock_irq(&uci_handle->wr_req_lock); + + return bytes_to_write; +} + +static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, + const char __user *buf, u32 size) +{ + void *data_loc; + unsigned long memcpy_result; + struct uci_client *uci_handle; + + if (!client_handle || !buf || !size) + return -EINVAL; + + if (size > TRB_MAX_DATA_SIZE) { + uci_log(UCI_DBG_ERROR, + "Too big write size: %d, max supported size is %d\n", + size, TRB_MAX_DATA_SIZE); + return -EFBIG; + } + + uci_handle = container_of(client_handle, struct uci_client, + out_handle); + data_loc = kmalloc(size, GFP_KERNEL); + if (!data_loc) { + uci_log(UCI_DBG_ERROR, + "Failed to allocate kernel buf for user requested size 0x%x\n", + size); + return -ENOMEM; + } + memcpy_result = copy_from_user(data_loc, buf, size); + if (memcpy_result) + goto error_memcpy; + + return uci_handle->send(uci_handle, data_loc, size); + +error_memcpy: + kfree(data_loc); + return -EFAULT; +} + +static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + struct uci_ctrl *uci_ctrl_handle; + + uci_ctrl_handle = file->private_data; + + if (!uci_ctrl_handle) + return -ENODEV; + + poll_wait(file, &uci_ctrl_handle->ctrl_wq, wait); + if (!atomic_read(&uci_ctxt.mhi_disabled) && + atomic_read(&uci_ctrl_handle->ctrl_data_update)) { + uci_log(UCI_DBG_VERBOSE, "Client can read ctrl_state"); + mask |= POLLIN | POLLRDNORM; + } + + uci_log(UCI_DBG_VERBOSE, + "Client attempted to poll ctrl returning mask 0x%x\n", + mask); + + return mask; +} + +static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + struct uci_client *uci_handle; + + uci_handle = file->private_data; + + if (!uci_handle) + return -ENODEV; + + poll_wait(file, &uci_handle->read_wq, wait); + poll_wait(file, &uci_handle->write_wq, wait); + if (!atomic_read(&uci_ctxt.mhi_disabled) && + !mhi_dev_channel_isempty(uci_handle->in_handle)) { + uci_log(UCI_DBG_VERBOSE, + "Client can read chan %d\n", uci_handle->in_chan); + mask |= POLLIN | POLLRDNORM; + } + if (!atomic_read(&uci_ctxt.mhi_disabled) && + !mhi_dev_channel_isempty(uci_handle->out_handle)) { + uci_log(UCI_DBG_VERBOSE, + "Client can write chan %d\n", uci_handle->out_chan); + mask |= POLLOUT | POLLWRNORM; + } + + uci_log(UCI_DBG_VERBOSE, + "Client attempted to poll chan %d, returning mask 0x%x\n", + uci_handle->in_chan, mask); + return mask; +} + +static int mhi_uci_alloc_write_reqs(struct uci_client *client) +{ + int i; + + client->wreqs = kcalloc(MAX_UCI_WR_REQ, + sizeof(struct mhi_req), + GFP_KERNEL); + if (!client->wreqs) { + uci_log(UCI_DBG_ERROR, "Write reqs alloc failed\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&client->wr_req_list); + for (i = 0; i < MAX_UCI_WR_REQ; ++i) + list_add_tail(&client->wreqs[i].list, &client->wr_req_list); + + uci_log(UCI_DBG_INFO, + "UCI write reqs allocation successful\n"); + return 0; +} + +static int mhi_uci_read_async(struct uci_client *uci_handle, + struct mhi_req *ureq, int *bytes_avail) +{ + int ret_val = 0; + unsigned long compl_ret; + + uci_log(UCI_DBG_ERROR, + "Async read for ch %d\n", uci_handle->in_chan); + + ureq->mode = IPA_DMA_ASYNC; + ureq->client_cb = mhi_uci_read_completion_cb; + ureq->snd_cmpl = 1; + ureq->context = uci_handle; + + reinit_completion(&uci_handle->read_done); + + *bytes_avail = mhi_dev_read_channel(ureq); + uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n", + ureq->len, *bytes_avail); + if (*bytes_avail < 0) { + uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n", + *bytes_avail); + return -EIO; + } + + if (*bytes_avail > 0) { + uci_log(UCI_DBG_VERBOSE, + "Waiting for async read completion!\n"); + compl_ret = + wait_for_completion_interruptible_timeout( + &uci_handle->read_done, + MHI_UCI_ASYNC_READ_TIMEOUT); + + if (compl_ret == -ERESTARTSYS) { + uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); + return compl_ret; + } else if (compl_ret == 0) { + uci_log(UCI_DBG_ERROR, "Read timed out for ch %d\n", + uci_handle->in_chan); + return -EIO; + } + uci_log(UCI_DBG_VERBOSE, + "wk up Read completed on ch %d\n", ureq->chan); + + uci_handle->pkt_loc = (void *)ureq->buf; + uci_handle->pkt_size = ureq->actual_len; + + uci_log(UCI_DBG_VERBOSE, + "Got pkt of sz 0x%x at adr %pK, ch %d\n", + uci_handle->pkt_size, + ureq->buf, ureq->chan); + } else { + uci_handle->pkt_loc = NULL; + uci_handle->pkt_size = 0; + } + + return ret_val; +} + +static int mhi_uci_read_sync(struct uci_client *uci_handle, + struct mhi_req *ureq, int *bytes_avail) +{ + int ret_val = 0; + + ureq->mode = IPA_DMA_SYNC; + *bytes_avail = mhi_dev_read_channel(ureq); + + uci_log(UCI_DBG_VERBOSE, "buf_size = 0x%x bytes_read = 0x%x\n", + ureq->len, *bytes_avail); + + if (*bytes_avail < 0) { + uci_log(UCI_DBG_ERROR, "Failed to read channel ret %d\n", + *bytes_avail); + return -EIO; + } + + if (*bytes_avail > 0) { + uci_handle->pkt_loc = (void *)ureq->buf; + uci_handle->pkt_size = ureq->actual_len; + + uci_log(UCI_DBG_VERBOSE, + "Got pkt of sz 0x%x at adr %pK, ch %d\n", + uci_handle->pkt_size, + ureq->buf, ureq->chan); + } else { + uci_handle->pkt_loc = NULL; + uci_handle->pkt_size = 0; + } + + return ret_val; +} + +static int open_client_mhi_channels(struct uci_client *uci_client) +{ + int rc = 0; + + uci_log(UCI_DBG_DBG, + "Starting channels %d %d.\n", + uci_client->out_chan, + uci_client->in_chan); + mutex_lock(&uci_client->out_chan_lock); + mutex_lock(&uci_client->in_chan_lock); + + /* Allocate write requests for async operations */ + if (!(uci_client->f_flags & O_SYNC)) { + rc = mhi_uci_alloc_write_reqs(uci_client); + if (rc) + goto handle_not_rdy_err; + uci_client->send = mhi_uci_send_async; + uci_client->read = mhi_uci_read_async; + } else { + uci_client->send = mhi_uci_send_sync; + uci_client->read = mhi_uci_read_sync; + } + + uci_log(UCI_DBG_DBG, + "Initializing inbound chan %d.\n", + uci_client->in_chan); + rc = mhi_init_read_chan(uci_client, uci_client->in_chan); + if (rc < 0) + uci_log(UCI_DBG_ERROR, + "Failed to init inbound 0x%x, ret 0x%x\n", + uci_client->in_chan, rc); + + rc = mhi_dev_open_channel(uci_client->out_chan, + &uci_client->out_handle, + uci_ctxt.event_notifier); + if (rc < 0) + goto handle_not_rdy_err; + + rc = mhi_dev_open_channel(uci_client->in_chan, + &uci_client->in_handle, + uci_ctxt.event_notifier); + if (rc < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to open chan %d, ret 0x%x\n", + uci_client->out_chan, rc); + goto handle_in_err; + } + atomic_set(&uci_client->mhi_chans_open, 1); + mutex_unlock(&uci_client->in_chan_lock); + mutex_unlock(&uci_client->out_chan_lock); + + return 0; + +handle_in_err: + mhi_dev_close_channel(uci_client->out_handle); +handle_not_rdy_err: + mutex_unlock(&uci_client->in_chan_lock); + mutex_unlock(&uci_client->out_chan_lock); + return rc; +} + +static int mhi_uci_ctrl_open(struct inode *inode, + struct file *file_handle) +{ + struct uci_ctrl *uci_ctrl_handle; + + uci_log(UCI_DBG_DBG, "Client opened ctrl file device node\n"); + + uci_ctrl_handle = &uci_ctxt.ctrl_handle; + if (!uci_ctrl_handle) + return -EINVAL; + + file_handle->private_data = uci_ctrl_handle; + + return 0; +} + +static int mhi_uci_client_open(struct inode *mhi_inode, + struct file *file_handle) +{ + struct uci_client *uci_handle; + int rc = 0; + + rc = iminor(mhi_inode); + if (rc < MHI_SOFTWARE_CLIENT_LIMIT) { + uci_handle = + &uci_ctxt.client_handles[iminor(mhi_inode)]; + } else { + uci_log(UCI_DBG_DBG, + "Cannot open struct device node 0x%x\n", iminor(mhi_inode)); + return -EINVAL; + } + + uci_log(UCI_DBG_DBG, + "Client opened struct device node 0x%x, ref count 0x%x\n", + iminor(mhi_inode), atomic_read(&uci_handle->ref_count)); + if (atomic_add_return(1, &uci_handle->ref_count) == 1) { + if (!uci_handle) { + atomic_dec(&uci_handle->ref_count); + return -ENOMEM; + } + uci_handle->uci_ctxt = &uci_ctxt; + uci_handle->f_flags = file_handle->f_flags; + if (!atomic_read(&uci_handle->mhi_chans_open)) { + uci_log(UCI_DBG_INFO, + "Opening channels client %d\n", + iminor(mhi_inode)); + rc = open_client_mhi_channels(uci_handle); + if (rc) { + uci_log(UCI_DBG_INFO, + "Failed to open channels ret %d\n", rc); + return rc; + } + } + } + file_handle->private_data = uci_handle; + + return 0; + +} + +static int mhi_uci_client_release(struct inode *mhi_inode, + struct file *file_handle) +{ + struct uci_client *uci_handle = file_handle->private_data; + int rc = 0; + + if (!uci_handle) + return -EINVAL; + + if (atomic_sub_return(1, &uci_handle->ref_count) == 0) { + uci_log(UCI_DBG_DBG, + "Last client left, closing channel 0x%x\n", + iminor(mhi_inode)); + if (atomic_read(&uci_handle->mhi_chans_open)) { + atomic_set(&uci_handle->mhi_chans_open, 0); + + if (!(uci_handle->f_flags & O_SYNC)) + kfree(uci_handle->wreqs); + mutex_lock(&uci_handle->out_chan_lock); + rc = mhi_dev_close_channel(uci_handle->out_handle); + wake_up(&uci_handle->write_wq); + mutex_unlock(&uci_handle->out_chan_lock); + + mutex_lock(&uci_handle->in_chan_lock); + rc = mhi_dev_close_channel(uci_handle->in_handle); + wake_up(&uci_handle->read_wq); + mutex_unlock(&uci_handle->in_chan_lock); + + } + atomic_set(&uci_handle->read_data_ready, 0); + atomic_set(&uci_handle->write_data_ready, 0); + file_handle->private_data = NULL; + } else { + uci_log(UCI_DBG_DBG, + "Client close chan %d, ref count 0x%x\n", + iminor(mhi_inode), + atomic_read(&uci_handle->ref_count)); + } + return rc; +} + +static void mhi_parse_state(char *buf, int *nbytes, uint32_t info) +{ + switch (info) { + case MHI_STATE_CONNECTED: + *nbytes = scnprintf(buf, MHI_CTRL_STATE, + "CONNECTED"); + break; + case MHI_STATE_DISCONNECTED: + *nbytes = scnprintf(buf, MHI_CTRL_STATE, + "DISCONNECTED"); + break; + case MHI_STATE_CONFIGURED: + default: + *nbytes = scnprintf(buf, MHI_CTRL_STATE, + "CONFIGURED"); + break; + } +} + +static int mhi_state_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + int rc, nbytes = 0; + uint32_t info = 0; + char buf[MHI_CTRL_STATE]; + + rc = mhi_ctrl_state_info(MHI_DEV_UEVENT_CTRL, &info); + if (rc) { + pr_err("Failed to obtain MHI_STATE\n"); + return -EINVAL; + } + + mhi_parse_state(buf, &nbytes, info); + add_uevent_var(env, "MHI_STATE=%s", buf); + + rc = mhi_ctrl_state_info(MHI_CLIENT_QMI_OUT, &info); + if (rc) { + pr_err("Failed to obtain channel 14 state\n"); + return -EINVAL; + } + nbytes = 0; + mhi_parse_state(buf, &nbytes, info); + add_uevent_var(env, "MHI_CHANNEL_STATE_14=%s", buf); + + rc = mhi_ctrl_state_info(MHI_CLIENT_MBIM_OUT, &info); + if (rc) { + pr_err("Failed to obtain channel 12 state\n"); + return -EINVAL; + } + nbytes = 0; + mhi_parse_state(buf, &nbytes, info); + add_uevent_var(env, "MHI_CHANNEL_STATE_12=%s", buf); + + return 0; +} + +static ssize_t mhi_uci_ctrl_client_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *offp) +{ + uint32_t rc = 0, info; + int nbytes, size; + char buf[MHI_CTRL_STATE]; + struct uci_ctrl *uci_ctrl_handle = NULL; + + if (!file || !user_buf || !count || + (count < MHI_CTRL_STATE) || !file->private_data) + return -EINVAL; + + uci_ctrl_handle = file->private_data; + rc = mhi_ctrl_state_info(MHI_CLIENT_QMI_OUT, &info); + if (rc) + return -EINVAL; + + switch (info) { + case MHI_STATE_CONFIGURED: + nbytes = scnprintf(buf, sizeof(buf), + "MHI_STATE=CONFIGURED"); + break; + case MHI_STATE_CONNECTED: + nbytes = scnprintf(buf, sizeof(buf), + "MHI_STATE=CONNECTED"); + break; + case MHI_STATE_DISCONNECTED: + nbytes = scnprintf(buf, sizeof(buf), + "MHI_STATE=DISCONNECTED"); + break; + default: + pr_err("invalid info:%d\n", info); + return -EINVAL; + } + + + size = simple_read_from_buffer(user_buf, count, offp, buf, nbytes); + + atomic_set(&uci_ctrl_handle->ctrl_data_update, 0); + + if (size == 0) + *offp = 0; + + return size; +} + +static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf, + size_t uspace_buf_size, loff_t *bytes_pending) +{ + struct uci_client *uci_handle = NULL; + struct mhi_dev_client *client_handle = NULL; + int bytes_avail = 0; + int ret_val = 0; + struct mutex *mutex; + ssize_t bytes_copied = 0; + u32 addr_offset = 0; + struct mhi_req ureq; + + if (!file || !ubuf || !uspace_buf_size || + !file->private_data) + return -EINVAL; + + uci_handle = file->private_data; + client_handle = uci_handle->in_handle; + mutex = &uci_handle->in_chan_lock; + ureq.chan = uci_handle->in_chan; + + mutex_lock(mutex); + ureq.client = client_handle; + ureq.buf = uci_handle->in_buf_list[0].addr; + ureq.len = uci_handle->in_buf_list[0].buf_size; + + + uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", + ureq.chan); + do { + if (!uci_handle->pkt_loc && + !atomic_read(&uci_ctxt.mhi_disabled)) { + ret_val = uci_handle->read(uci_handle, &ureq, + &bytes_avail); + if (ret_val) + goto error; + if (bytes_avail > 0) + *bytes_pending = (loff_t)uci_handle->pkt_size; + } + if (bytes_avail == 0) { + + /* If nothing was copied yet, wait for data */ + uci_log(UCI_DBG_VERBOSE, + "No data read_data_ready %d, chan %d\n", + atomic_read(&uci_handle->read_data_ready), + ureq.chan); + if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) { + ret_val = -EAGAIN; + goto error; + } + ret_val = wait_event_interruptible(uci_handle->read_wq, + (!mhi_dev_channel_isempty(client_handle))); + + if (ret_val == -ERESTARTSYS) { + uci_log(UCI_DBG_ERROR, "Exit signal caught\n"); + goto error; + } + uci_log(UCI_DBG_VERBOSE, + "wk up Got data on ch %d read_data_ready %d\n", + ureq.chan, + atomic_read(&uci_handle->read_data_ready)); + + /* A valid packet was returned from MHI */ + } else if (bytes_avail > 0) { + uci_log(UCI_DBG_VERBOSE, + "Got packet: avail pkts %d phy_adr %pK, ch %d\n", + atomic_read(&uci_handle->read_data_ready), + ureq.buf, + ureq.chan); + break; + /* + * MHI did not return a valid packet, but we have one + * which we did not finish returning to user + */ + } else { + uci_log(UCI_DBG_CRITICAL, + "chan %d err: avail pkts %d phy_adr %pK", + ureq.chan, + atomic_read(&uci_handle->read_data_ready), + ureq.buf); + return -EIO; + } + } while (!uci_handle->pkt_loc); + + if (uspace_buf_size >= *bytes_pending) { + addr_offset = uci_handle->pkt_size - *bytes_pending; + if (copy_to_user(ubuf, uci_handle->pkt_loc + addr_offset, + *bytes_pending)) { + ret_val = -EIO; + goto error; + } + + bytes_copied = *bytes_pending; + *bytes_pending = 0; + uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n", + bytes_copied, (u32)*bytes_pending, ureq.chan); + } else { + addr_offset = uci_handle->pkt_size - *bytes_pending; + if (copy_to_user(ubuf, (void *) (uintptr_t)uci_handle->pkt_loc + + addr_offset, uspace_buf_size)) { + ret_val = -EIO; + goto error; + } + bytes_copied = uspace_buf_size; + *bytes_pending -= uspace_buf_size; + uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n", + bytes_copied, + (u32)*bytes_pending, + ureq.chan); + } + /* We finished with this buffer, map it back */ + if (*bytes_pending == 0) { + uci_log(UCI_DBG_VERBOSE, + "All data consumed. Pkt loc %pK ,chan %d\n", + uci_handle->pkt_loc, ureq.chan); + uci_handle->pkt_loc = 0; + uci_handle->pkt_size = 0; + } + uci_log(UCI_DBG_VERBOSE, + "Returning 0x%x bytes, 0x%x bytes left\n", + bytes_copied, (u32)*bytes_pending); + mutex_unlock(mutex); + return bytes_copied; +error: + mutex_unlock(mutex); + uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val); + return ret_val; +} + +static ssize_t mhi_uci_client_write(struct file *file, + const char __user *buf, + size_t count, loff_t *offp) +{ + struct uci_client *uci_handle = NULL; + int ret_val = 0; + u32 chan = 0xFFFFFFFF; + + if (file == NULL || buf == NULL || + !count || file->private_data == NULL) + return -EINVAL; + + uci_handle = file->private_data; + + if (atomic_read(&uci_ctxt.mhi_disabled)) { + uci_log(UCI_DBG_ERROR, + "Client %d attempted to write while MHI is disabled\n", + uci_handle->out_chan); + return -EIO; + } + chan = uci_handle->out_chan; + mutex_lock(&uci_handle->out_chan_lock); + while (!ret_val) { + ret_val = mhi_uci_send_packet(&uci_handle->out_handle, + buf, count); + if (ret_val < 0) { + uci_log(UCI_DBG_ERROR, + "Error while writing data to MHI, chan %d, buf %pK, size %d\n", + chan, (void *)buf, count); + ret_val = -EIO; + break; + } + if (!ret_val) { + uci_log(UCI_DBG_VERBOSE, + "No descriptors available, did we poll, chan %d?\n", + chan); + mutex_unlock(&uci_handle->out_chan_lock); + if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) + return -EAGAIN; + ret_val = wait_event_interruptible(uci_handle->write_wq, + !mhi_dev_channel_isempty( + uci_handle->out_handle)); + + mutex_lock(&uci_handle->out_chan_lock); + if (-ERESTARTSYS == ret_val) { + uci_log(UCI_DBG_WARNING, + "Waitqueue cancelled by system\n"); + break; + } + } + } + mutex_unlock(&uci_handle->out_chan_lock); + return ret_val; +} + +void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason) +{ + struct uci_ctrl *uci_ctrl_handle = NULL; + + if (reason->reason == MHI_DEV_CTRL_UPDATE) { + uci_ctrl_handle = &uci_ctxt.ctrl_handle; + if (!uci_ctrl_handle) { + pr_err("Invalid uci ctrl handle\n"); + return; + } + + uci_log(UCI_DBG_DBG, "received state change update\n"); + wake_up(&uci_ctrl_handle->ctrl_wq); + atomic_set(&uci_ctrl_handle->ctrl_data_update, 1); + } +} +EXPORT_SYMBOL(uci_ctrl_update); + +static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason) +{ + int client_index = 0; + struct uci_client *uci_handle = NULL; + + if (reason->reason == MHI_DEV_TRE_AVAILABLE) { + client_index = reason->ch_id / 2; + uci_handle = &uci_ctxt.client_handles[client_index]; + uci_log(UCI_DBG_DBG, + "recived TRE available event for chan %d\n", + uci_handle->in_chan); + + if (reason->ch_id % 2) { + atomic_set(&uci_handle->write_data_ready, 1); + wake_up(&uci_handle->write_wq); + } else { + atomic_set(&uci_handle->read_data_ready, 1); + wake_up(&uci_handle->read_wq); + } + } +} + +static int mhi_register_client(struct uci_client *mhi_client, int index) +{ + init_waitqueue_head(&mhi_client->read_wq); + init_waitqueue_head(&mhi_client->write_wq); + mhi_client->client_index = index; + + mutex_init(&mhi_client->in_chan_lock); + mutex_init(&mhi_client->out_chan_lock); + spin_lock_init(&mhi_client->wr_req_lock); + + uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan); + return 0; +} + +static long mhi_uci_client_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct uci_client *uci_handle = NULL; + int rc = 0; + struct ep_info epinfo; + + if (file == NULL || file->private_data == NULL) + return -EINVAL; + + uci_handle = file->private_data; + + uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n", + cmd, uci_handle->client_index); + + if (cmd == MHI_UCI_EP_LOOKUP) { + uci_log(UCI_DBG_DBG, "EP_LOOKUP for client:%d\n", + uci_handle->client_index); + epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE; + epinfo.ph_ep_info.peripheral_iface_id = MHI_QTI_IFACE_ID; + epinfo.ipa_ep_pair.cons_pipe_num = + ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD); + epinfo.ipa_ep_pair.prod_pipe_num = + ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS); + + uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n", + uci_handle->client_index, + epinfo.ph_ep_info.ep_type, + epinfo.ph_ep_info.peripheral_iface_id); + + uci_log(UCI_DBG_DBG, "ipa_cons_idx:%d ipa_prod_idx:%d\n", + epinfo.ipa_ep_pair.cons_pipe_num, + epinfo.ipa_ep_pair.prod_pipe_num); + + rc = copy_to_user((void __user *)arg, &epinfo, + sizeof(epinfo)); + if (rc) + uci_log(UCI_DBG_ERROR, "copying to user space failed"); + } else { + uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd); + rc = -EINVAL; + } + + return rc; +} + +static const struct file_operations mhi_uci_ctrl_client_fops = { + .open = mhi_uci_ctrl_open, + .read = mhi_uci_ctrl_client_read, + .poll = mhi_uci_ctrl_poll, +}; + +static const struct file_operations mhi_uci_client_fops = { + .read = mhi_uci_client_read, + .write = mhi_uci_client_write, + .open = mhi_uci_client_open, + .release = mhi_uci_client_release, + .poll = mhi_uci_client_poll, + .unlocked_ioctl = mhi_uci_client_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mhi_uci_client_ioctl, +#endif +}; + +static int uci_device_create(struct uci_client *client) +{ + unsigned long r; + int n; + ssize_t dst_size; + unsigned int client_index; + static char device_name[MAX_DEVICE_NAME_SIZE]; + + client_index = CHAN_TO_CLIENT(client->out_chan); + if (uci_ctxt.client_handles[client_index].dev) + return -EEXIST; + + cdev_init(&uci_ctxt.cdev[client_index], &mhi_uci_client_fops); + uci_ctxt.cdev[client_index].owner = THIS_MODULE; + r = cdev_add(&uci_ctxt.cdev[client_index], + uci_ctxt.start_ctrl_nr + client_index, 1); + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to add cdev for client %d, ret 0x%x\n", + client_index, r); + return r; + } + if (!client->in_chan_attr->device_name) { + n = snprintf(device_name, sizeof(device_name), + DEVICE_NAME "_pipe_%d", CLIENT_TO_CHAN(client_index)); + if (n >= sizeof(device_name)) { + uci_log(UCI_DBG_ERROR, "Device name buf too short\n"); + r = -E2BIG; + goto error; + } + } else { + dst_size = strscpy(device_name, + client->in_chan_attr->device_name, + sizeof(device_name)); + if (dst_size <= 0) { + uci_log(UCI_DBG_ERROR, "Device name buf too short\n"); + r = dst_size; + goto error; + } + } + + uci_ctxt.client_handles[client_index].dev = + device_create(uci_ctxt.mhi_uci_class, NULL, + uci_ctxt.start_ctrl_nr + client_index, + NULL, device_name); + if (IS_ERR(uci_ctxt.client_handles[client_index].dev)) { + uci_log(UCI_DBG_ERROR, + "Failed to create device for client %d\n", + client_index); + r = -EIO; + goto error; + } + + uci_log(UCI_DBG_INFO, + "Created device with class 0x%pK and ctrl number %d\n", + uci_ctxt.mhi_uci_class, + uci_ctxt.start_ctrl_nr + client_index); + + return 0; + +error: + cdev_del(&uci_ctxt.cdev[client_index]); + return r; +} + +static void mhi_uci_client_cb(struct mhi_dev_client_cb_data *cb_data) +{ + struct uci_client *client = cb_data->user_data; + + uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n", + cb_data->channel, cb_data->ctrl_info); + + if (cb_data->ctrl_info == MHI_STATE_CONNECTED) + uci_device_create(client); +} + +static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt) +{ + u32 i; + u32 index; + struct uci_client *client; + const struct chan_attr *chan_attrib; + + for (i = 0; i < ARRAY_SIZE(uci_chan_attr_table); i += 2) { + chan_attrib = &uci_chan_attr_table[i]; + index = CHAN_TO_CLIENT(chan_attrib->chan_id); + client = &uci_ctxt->client_handles[index]; + client->out_chan_attr = chan_attrib; + client->in_chan_attr = ++chan_attrib; + client->in_chan = index * 2; + client->out_chan = index * 2 + 1; + client->in_buf_list = + kcalloc(chan_attrib->nr_trbs, + sizeof(struct mhi_dev_iov), + GFP_KERNEL); + if (!client->in_buf_list) + return -ENOMEM; + /* Register callback with MHI if requested */ + if (client->out_chan_attr->register_cb) + mhi_register_state_cb(mhi_uci_client_cb, client, + client->out_chan); + } + return 0; +} + +int mhi_uci_init(void) +{ + u32 i = 0; + int ret_val = 0; + struct uci_client *mhi_client = NULL; + unsigned long r = 0; + + mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES, + "mhi-uci", 0); + if (mhi_uci_ipc_log == NULL) { + uci_log(UCI_DBG_WARNING, + "Failed to create IPC logging context\n"); + } + uci_ctxt.event_notifier = uci_event_notifier; + + uci_log(UCI_DBG_DBG, "Setting up channel attributes.\n"); + + ret_val = uci_init_client_attributes(&uci_ctxt); + if (ret_val < 0) { + uci_log(UCI_DBG_ERROR, + "Failed to init client attributes\n"); + return -EIO; + } + + uci_log(UCI_DBG_DBG, "Initializing clients\n"); + uci_log(UCI_DBG_INFO, "Registering for MHI events.\n"); + + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) { + mhi_client = &uci_ctxt.client_handles[i]; + if (!mhi_client->in_chan_attr) + continue; + r = mhi_register_client(mhi_client, i); + if (r) { + uci_log(UCI_DBG_CRITICAL, + "Failed to reg client %d ret %d\n", + r, i); + } + } + + init_waitqueue_head(&uci_ctxt.ctrl_handle.ctrl_wq); + uci_log(UCI_DBG_INFO, "Allocating char devices.\n"); + r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr, + 0, MHI_MAX_SOFTWARE_CHANNELS, + DEVICE_NAME); + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to alloc char devs, ret 0x%x\n", r); + goto failed_char_alloc; + } + + r = alloc_chrdev_region(&uci_ctxt.ctrl_nr, 0, 1, DEVICE_NAME); + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to alloc char ctrl devs, 0x%x\n", r); + goto failed_char_alloc; + } + + uci_log(UCI_DBG_INFO, "Creating class\n"); + uci_ctxt.mhi_uci_class = class_create(THIS_MODULE, + DEVICE_NAME); + if (IS_ERR(uci_ctxt.mhi_uci_class)) { + uci_log(UCI_DBG_ERROR, + "Failed to instantiate class, ret 0x%x\n", r); + r = -ENOMEM; + goto failed_class_add; + } + + uci_log(UCI_DBG_INFO, "Setting up device nodes.\n"); + for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) { + mhi_client = &uci_ctxt.client_handles[i]; + if (!mhi_client->in_chan_attr) + continue; + /* + * Delay device node creation until the callback for + * this client's channels is called by the MHI driver, + * if one is registered. + */ + if (mhi_client->in_chan_attr->register_cb) + continue; + ret_val = uci_device_create(mhi_client); + if (ret_val) + goto failed_device_create; + } + + /* Control node */ + uci_ctxt.cdev_ctrl = cdev_alloc(); + if (uci_ctxt.cdev_ctrl == NULL) { + pr_err("%s: ctrl cdev alloc failed\n", __func__); + return 0; + } + + cdev_init(uci_ctxt.cdev_ctrl, &mhi_uci_ctrl_client_fops); + uci_ctxt.cdev_ctrl->owner = THIS_MODULE; + r = cdev_add(uci_ctxt.cdev_ctrl, uci_ctxt.ctrl_nr, 1); + if (IS_ERR_VALUE(r)) { + uci_log(UCI_DBG_ERROR, + "Failed to add ctrl cdev %d, ret 0x%x\n", i, r); + kfree(uci_ctxt.cdev_ctrl); + uci_ctxt.cdev_ctrl = NULL; + return 0; + } + + uci_ctxt.dev = + device_create(uci_ctxt.mhi_uci_class, NULL, + uci_ctxt.ctrl_nr, + NULL, DEVICE_NAME "_ctrl"); + if (IS_ERR(uci_ctxt.dev)) { + uci_log(UCI_DBG_ERROR, + "Failed to add ctrl cdev %d\n", i); + cdev_del(uci_ctxt.cdev_ctrl); + kfree(uci_ctxt.cdev_ctrl); + uci_ctxt.cdev_ctrl = NULL; + } + + uci_ctxt.mhi_uci_class->dev_uevent = mhi_state_uevent; + + return 0; + +failed_device_create: + while (--i >= 0) { + cdev_del(&uci_ctxt.cdev[i]); + device_destroy(uci_ctxt.mhi_uci_class, + MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2)); + }; + class_destroy(uci_ctxt.mhi_uci_class); +failed_class_add: + unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr), + MHI_MAX_SOFTWARE_CHANNELS); +failed_char_alloc: + return r; +} diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c index 0d148b58249b29eade251c96e2759d5aeb930fe6..c196efe58a9f617e0a65f64adba9dfd510b6a7da 100644 --- a/drivers/platform/msm/msm_11ad/msm_11ad.c +++ b/drivers/platform/msm/msm_11ad/msm_11ad.c @@ -55,6 +55,7 @@ struct device; static const char * const gpio_en_name = "qcom,wigig-en"; +static const char * const gpio_dc_name = "qcom,wigig-dc"; static const char * const sleep_clk_en_name = "qcom,sleep-clk-en"; struct wigig_pci { @@ -88,6 +89,7 @@ struct msm11ad_ctx { struct list_head list; struct device *dev; /* for platform device */ int gpio_en; /* card enable */ + int gpio_dc; int sleep_clk_en; /* sleep clock enable for low PM management */ /* pci device */ @@ -502,6 +504,9 @@ static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx) if (ctx->gpio_en >= 0) gpio_direction_output(ctx->gpio_en, 0); + if (ctx->gpio_dc >= 0) + gpio_direction_output(ctx->gpio_dc, 0); + if (ctx->sleep_clk_en >= 0) gpio_direction_output(ctx->sleep_clk_en, 0); @@ -532,6 +537,11 @@ static int msm_11ad_turn_device_power_on(struct msm11ad_ctx *ctx) if (ctx->sleep_clk_en >= 0) gpio_direction_output(ctx->sleep_clk_en, 1); + if (ctx->gpio_dc >= 0) { + gpio_direction_output(ctx->gpio_dc, 1); + msleep(WIGIG_ENABLE_DELAY); + } + if (ctx->gpio_en >= 0) { gpio_direction_output(ctx->gpio_en, 1); msleep(WIGIG_ENABLE_DELAY); @@ -1017,6 +1027,7 @@ static int msm_11ad_probe(struct platform_device *pdev) * compatible = "qcom,wil6210"; * qcom,pcie-parent = <&pcie1>; * qcom,wigig-en = <&tlmm 94 0>; (ctx->gpio_en) + * qcom,wigig-dc = <&tlmm 81 0>; (ctx->gpio_dc) * qcom,sleep-clk-en = <&pm8994_gpios 18 0>; (ctx->sleep_clk_en) * qcom,msm-bus,name = "wil6210"; * qcom,msm-bus,num-cases = <2>; @@ -1032,7 +1043,11 @@ static int msm_11ad_probe(struct platform_device *pdev) * qcom,smmu-exist; */ - /* wigig-en is optional property */ + /* wigig-en and wigig-dc are optional properties */ + ctx->gpio_dc = of_get_named_gpio(of_node, gpio_dc_name, 0); + if (ctx->gpio_dc < 0) + dev_warn(ctx->dev, "GPIO <%s> not found, dc GPIO not used\n", + gpio_dc_name); ctx->gpio_en = of_get_named_gpio(of_node, gpio_en_name, 0); if (ctx->gpio_en < 0) dev_warn(ctx->dev, "GPIO <%s> not found, enable GPIO not used\n", @@ -1107,6 +1122,22 @@ static int msm_11ad_probe(struct platform_device *pdev) goto out_vreg_clk; } + if (ctx->gpio_dc >= 0) { + rc = gpio_request(ctx->gpio_dc, gpio_dc_name); + if (rc < 0) { + dev_err(ctx->dev, "failed to request GPIO %d <%s>\n", + ctx->gpio_dc, gpio_dc_name); + goto out_req_dc; + } + rc = gpio_direction_output(ctx->gpio_dc, 1); + if (rc < 0) { + dev_err(ctx->dev, "failed to set GPIO %d <%s>\n", + ctx->gpio_dc, gpio_dc_name); + goto out_set_dc; + } + msleep(WIGIG_ENABLE_DELAY); + } + if (ctx->gpio_en >= 0) { rc = gpio_request(ctx->gpio_en, gpio_en_name); if (rc < 0) { @@ -1193,12 +1224,13 @@ static int msm_11ad_probe(struct platform_device *pdev) /* report */ dev_info(ctx->dev, "msm_11ad discovered. %pK {\n" " gpio_en = %d\n" + " gpio_dc = %d\n" " sleep_clk_en = %d\n" " rc_index = %d\n" " use_smmu = %d\n" " pcidev = %pK\n" - "}\n", ctx, ctx->gpio_en, ctx->sleep_clk_en, ctx->rc_index, - ctx->use_smmu, ctx->pcidev); + "}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en, + ctx->rc_index, ctx->use_smmu, ctx->pcidev); platform_set_drvdata(pdev, ctx); device_disable_async_suspend(&pcidev->dev); @@ -1218,6 +1250,13 @@ static int msm_11ad_probe(struct platform_device *pdev) gpio_free(ctx->gpio_en); out_req: ctx->gpio_en = -EINVAL; + if (ctx->gpio_dc >= 0) + gpio_direction_output(ctx->gpio_dc, 0); +out_set_dc: + if (ctx->gpio_dc >= 0) + gpio_free(ctx->gpio_dc); +out_req_dc: + ctx->gpio_dc = -EINVAL; out_vreg_clk: msm_11ad_disable_clocks(ctx); msm_11ad_release_clocks(ctx); @@ -1242,6 +1281,10 @@ static int msm_11ad_remove(struct platform_device *pdev) gpio_direction_output(ctx->gpio_en, 0); gpio_free(ctx->gpio_en); } + if (ctx->gpio_dc >= 0) { + gpio_direction_output(ctx->gpio_dc, 0); + gpio_free(ctx->gpio_dc); + } if (ctx->sleep_clk_en >= 0) gpio_free(ctx->sleep_clk_en); diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index 89d0a0d246827c37e705b823040b9a78539ce69d..9e9ebbbf29aa311318ee0289e92b817053ff1682 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -309,7 +309,9 @@ static int geni_se_select_fifo_mode(void __iomem *base) static int geni_se_select_dma_mode(void __iomem *base) { + int proto = get_se_proto(base); unsigned int geni_dma_mode = 0; + unsigned int common_geni_m_irq_en; geni_write_reg(0, base, SE_GSI_EVENT_EN); geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR); @@ -318,6 +320,12 @@ static int geni_se_select_dma_mode(void __iomem *base) geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR); geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN); + common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN); + if (proto != UART) + common_geni_m_irq_en &= + ~(M_TX_FIFO_WATERMARK_EN | M_RX_FIFO_WATERMARK_EN); + + geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN); geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN); geni_dma_mode |= GENI_DMA_MODE_EN; geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN); diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 8f701b26d347ad5a8cf9ab4e628f7775f79101f2..b7b891a5da6c36ef6117421c58a6ee119290a38d 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -124,6 +124,7 @@ void power_supply_changed(struct power_supply *psy) } EXPORT_SYMBOL_GPL(power_supply_changed); +static int psy_register_cooler(struct device *dev, struct power_supply *psy); /* * Notify that power supply was registered after parent finished the probing. * @@ -131,6 +132,8 @@ EXPORT_SYMBOL_GPL(power_supply_changed); * calling power_supply_changed() directly from power_supply_register() * would lead to execution of get_property() function provided by the driver * too early - before the probe ends. + * Also, registering cooling device from the probe will execute the + * get_property() function. So register the cooling device after the probe. * * Avoid that by waiting on parent's mutex. */ @@ -142,6 +145,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) if (psy->dev.parent) mutex_lock(&psy->dev.parent->mutex); + psy_register_cooler(psy->dev.parent, psy); power_supply_changed(psy); if (psy->dev.parent) @@ -903,10 +907,6 @@ __power_supply_register(struct device *parent, if (rc) goto register_thermal_failed; - rc = psy_register_cooler(parent, psy); - if (rc) - goto register_cooler_failed; - rc = power_supply_create_triggers(psy); if (rc) goto create_triggers_failed; @@ -930,8 +930,6 @@ __power_supply_register(struct device *parent, return psy; create_triggers_failed: - psy_unregister_cooler(psy); -register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: device_del(dev); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 2491252fdbfcf4b107b2fc46b93406a88eb3587f..4d8aca7b81aa857a29cabc5636a457a9c9dbd068 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -381,8 +381,11 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(esr_actual), POWER_SUPPLY_ATTR(esr_nominal), POWER_SUPPLY_ATTR(soh), + POWER_SUPPLY_ATTR(clear_soh), POWER_SUPPLY_ATTR(force_recharge), POWER_SUPPLY_ATTR(fcc_stepper_enable), + POWER_SUPPLY_ATTR(toggle_stat), + POWER_SUPPLY_ATTR(main_fcc_max), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 66044ac593b71f292e1596e685183be828277b4c..a32b1f60421ce018030a6b67b22afc6d65c96909 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -92,6 +92,7 @@ struct pl_data { struct notifier_block nb; bool pl_disable; int taper_entry_fv; + int main_fcc_max; }; struct pl_data *the_chip; @@ -468,10 +469,16 @@ static void get_fcc_split(struct pl_data *chip, int total_ua, * through main charger's BATFET, keep the main charger's FCC * to the votable result. */ - if (chip->pl_batfet_mode == POWER_SUPPLY_PL_STACKED_BATFET) + if (chip->pl_batfet_mode == POWER_SUPPLY_PL_STACKED_BATFET) { *master_ua = max(0, total_ua); - else + if (chip->main_fcc_max) + *master_ua = min(*master_ua, + chip->main_fcc_max + *slave_ua); + } else { *master_ua = max(0, total_ua - *slave_ua); + if (chip->main_fcc_max) + *master_ua = min(*master_ua, chip->main_fcc_max); + } } static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua, @@ -1003,6 +1010,16 @@ static int pl_disable_vote_callback(struct votable *votable, chip->fcc_stepper_enable = pval.intval; pr_debug("FCC Stepper %s\n", pval.intval ? "enabled" : "disabled"); + rc = power_supply_get_property(chip->main_psy, + POWER_SUPPLY_PROP_MAIN_FCC_MAX, &pval); + if (rc < 0) { + pl_dbg(chip, PR_PARALLEL, + "Couldn't read primary charger FCC upper limit, rc=%d\n", + rc); + } else if (pval.intval > 0) { + chip->main_fcc_max = pval.intval; + } + if (chip->fcc_stepper_enable) { cancel_delayed_work_sync(&chip->fcc_stepper_work); vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0); @@ -1148,6 +1165,10 @@ static int pl_disable_vote_callback(struct votable *votable, (master_fcc_ua * 100) / total_fcc_ua, (slave_fcc_ua * 100) / total_fcc_ua); } else { + if (chip->main_fcc_max) + total_fcc_ua = min(total_fcc_ua, + chip->main_fcc_max); + if (!chip->fcc_stepper_enable) { if (IS_USBIN(chip->pl_mode)) split_settled(chip); diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 00d49ffa193a38e94c08be878b68814ebc10067b..18f49fded5f8c17f8272d7495f9e0df3599903be 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -77,6 +77,7 @@ #define ESR_FCC_VOTER "fg_esr_fcc" #define FG_PARALLEL_EN_VOTER "fg_parallel_en" +#define MEM_ATTN_IRQ_VOTER "fg_mem_attn_irq" #define BUCKET_COUNT 8 #define BUCKET_SOC_PCT (256 / BUCKET_COUNT) diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h index 575b77aa3d01c7b212549ff01f035fa765b75bbf..7063a4396cf8e35682b01bafdf6963190209aa8c 100644 --- a/drivers/power/supply/qcom/qg-core.h +++ b/drivers/power/supply/qcom/qg-core.h @@ -50,11 +50,13 @@ struct qg_dt { int delta_soc; int rbat_conn_mohm; int ignore_shutdown_soc_secs; + int shutdown_temp_diff; int cold_temp_threshold; int esr_qual_i_ua; int esr_qual_v_uv; int esr_disable_soc; int esr_min_ibat_ua; + int shutdown_soc_threshold; bool hold_soc_while_full; bool linearize_soc; bool cl_disable; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 7eda9f50ebe494b158f2857e2b97712aa01c188e..996248b81109196d123a7ce6af441b8e340f6c32 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -223,9 +223,11 @@ struct fg_gen4_chip { struct votable *pl_disable_votable; struct votable *cp_disable_votable; struct votable *parallel_current_en_votable; + struct votable *mem_attn_irq_en_votable; struct work_struct esr_calib_work; struct alarm esr_fast_cal_timer; struct delayed_work pl_enable_work; + struct delayed_work pl_current_en_work; struct completion mem_attn; char batt_profile[PROFILE_LEN]; enum slope_limit_status slope_limit_sts; @@ -235,6 +237,7 @@ struct fg_gen4_chip { int esr_actual; int esr_nominal; int soh; + bool first_profile_load; bool ki_coeff_dischg_en; bool slope_limit_en; bool esr_fast_calib; @@ -1579,12 +1582,24 @@ static void clear_battery_profile(struct fg_dev *fg) #define BOOTLOADER_LOAD_BIT BIT(1) #define BOOTLOADER_RESTART_BIT BIT(2) #define HLOS_RESTART_BIT BIT(3) +#define FIRST_PROFILE_LOAD_BIT BIT(4) static bool is_profile_load_required(struct fg_gen4_chip *chip) { struct fg_dev *fg = &chip->fg; u8 buf[PROFILE_COMP_LEN], val; - bool profiles_same = false; - int rc; + bool profiles_same = false, valid_integrity = false; + int rc, i; + u8 white_list_values[] = { + HLOS_RESTART_BIT, + BOOTLOADER_LOAD_BIT, + BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT, + BOOTLOADER_RESTART_BIT | HLOS_RESTART_BIT, + BOOTLOADER_LOAD_BIT | FIRST_PROFILE_LOAD_BIT, + BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT | + FIRST_PROFILE_LOAD_BIT, + HLOS_RESTART_BIT | BOOTLOADER_RESTART_BIT | + FIRST_PROFILE_LOAD_BIT, + }; rc = fg_sram_read(fg, PROFILE_INTEGRITY_WORD, PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT); @@ -1599,8 +1614,14 @@ static bool is_profile_load_required(struct fg_gen4_chip *chip) /* Whitelist the values */ val &= ~PROFILE_LOAD_BIT; - if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT && - val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) { + for (i = 0; i < ARRAY_SIZE(white_list_values); i++) { + if (val == white_list_values[i]) { + valid_integrity = true; + break; + } + } + + if (!valid_integrity) { val |= PROFILE_LOAD_BIT; pr_warn("Garbage value in profile integrity word: 0x%x\n", val); @@ -1739,6 +1760,13 @@ static void profile_load_work(struct work_struct *work) pr_err("Error in writing to ACT_BATT_CAP rc=%d\n", rc); } done: + rc = fg_sram_read(fg, PROFILE_INTEGRITY_WORD, + PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT); + if (!rc && (val & FIRST_PROFILE_LOAD_BIT)) { + fg_dbg(fg, FG_STATUS, "First profile load bit is set\n"); + chip->first_profile_load = true; + } + rc = fg_gen4_bp_params_config(fg); if (rc < 0) pr_err("Error in configuring battery profile params, rc:%d\n", @@ -2133,16 +2161,6 @@ static int fg_gen4_charge_full_update(struct fg_dev *fg) return rc; } -static void fg_gen4_parallel_current_config(struct fg_gen4_chip *chip) -{ - struct fg_dev *fg = &chip->fg; - bool input_present = is_input_present(fg), en; - - en = fg->charge_done ? false : input_present; - - vote(chip->parallel_current_en_votable, FG_PARALLEL_EN_VOTER, en, 0); -} - static int fg_gen4_esr_fcc_config(struct fg_gen4_chip *chip) { struct fg_dev *fg = &chip->fg; @@ -2770,6 +2788,7 @@ static struct fg_irq_info fg_irqs[FG_GEN4_IRQ_MAX] = { [MEM_ATTN_IRQ] = { .name = "mem-attn", .handler = fg_mem_attn_irq_handler, + .wakeable = true, }, [DMA_GRANT_IRQ] = { .name = "dma-grant", @@ -2890,9 +2909,8 @@ static void esr_calib_work(struct work_struct *work) * to disable the interrupt OR ESR fast calibration timer is expired * OR after one retry, disable ESR fast calibration. */ - if ((chip->delta_esr_count >= chip->dt.delta_esr_disable_count) || - chip->esr_fast_cal_timer_expired || - (chip->esr_fast_calib_retry && chip->delta_esr_count > 0)) { + if (chip->delta_esr_count >= chip->dt.delta_esr_disable_count || + chip->esr_fast_cal_timer_expired) { rc = fg_gen4_esr_fast_calib_config(chip, false); if (rc < 0) pr_err("Error in configuring esr_fast_calib, rc=%d\n", @@ -2972,6 +2990,36 @@ static void esr_calib_work(struct work_struct *work) vote(fg->awake_votable, ESR_CALIB, false, 0); } +static void pl_current_en_work(struct work_struct *work) +{ + struct fg_gen4_chip *chip = container_of(work, + struct fg_gen4_chip, + pl_current_en_work.work); + struct fg_dev *fg = &chip->fg; + bool input_present = is_input_present(fg), en; + + en = fg->charge_done ? false : input_present; + + /* + * If mem_attn_irq is disabled and parallel summing current + * configuration needs to be modified, then enable mem_attn_irq and + * wait for 1 second before doing it. + */ + if (get_effective_result(chip->parallel_current_en_votable) != en && + !get_effective_result(chip->mem_attn_irq_en_votable)) { + vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, + true, 0); + schedule_delayed_work(&chip->pl_current_en_work, + msecs_to_jiffies(1000)); + return; + } + + if (!get_effective_result(chip->mem_attn_irq_en_votable)) + return; + + vote(chip->parallel_current_en_votable, FG_PARALLEL_EN_VOTER, en, 0); +} + static void pl_enable_work(struct work_struct *work) { struct fg_gen4_chip *chip = container_of(work, @@ -3063,7 +3111,7 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc); - fg_gen4_parallel_current_config(chip); + schedule_delayed_work(&chip->pl_current_en_work, 0); ttf_update(chip->ttf, input_present); fg->prev_charge_status = fg->charge_status; @@ -3222,6 +3270,9 @@ static int fg_esr_fast_cal_sysfs(const char *val, const struct kernel_param *kp) if (!chip) return -ENODEV; + if (fg_esr_fast_cal_en) + chip->delta_esr_count = 0; + rc = fg_gen4_esr_fast_calib_config(chip, fg_esr_fast_cal_en); if (rc < 0) return rc; @@ -3317,6 +3368,9 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SOC_REPORTING_READY: pval->intval = fg->soc_reporting_ready; break; + case POWER_SUPPLY_PROP_CLEAR_SOH: + pval->intval = chip->first_profile_load; + break; case POWER_SUPPLY_PROP_SOH: pval->intval = chip->soh; break; @@ -3363,7 +3417,9 @@ static int fg_psy_set_property(struct power_supply *psy, const union power_supply_propval *pval) { struct fg_gen4_chip *chip = power_supply_get_drvdata(psy); + struct fg_dev *fg = &chip->fg; int rc = 0; + u8 val, mask; switch (psp) { case POWER_SUPPLY_PROP_CHARGE_FULL: @@ -3410,6 +3466,21 @@ static int fg_psy_set_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SOH: chip->soh = pval->intval; break; + case POWER_SUPPLY_PROP_CLEAR_SOH: + if (chip->first_profile_load && !pval->intval) { + fg_dbg(fg, FG_STATUS, "Clearing first profile load bit\n"); + val = 0; + mask = FIRST_PROFILE_LOAD_BIT; + rc = fg_sram_masked_write(fg, PROFILE_INTEGRITY_WORD, + PROFILE_INTEGRITY_OFFSET, mask, val, + FG_IMA_DEFAULT); + if (rc < 0) + pr_err("Error in writing to profile integrity word rc=%d\n", + rc); + else + chip->first_profile_load = false; + } + break; default: break; } @@ -3427,6 +3498,7 @@ static int fg_property_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_ESR_ACTUAL: case POWER_SUPPLY_PROP_ESR_NOMINAL: case POWER_SUPPLY_PROP_SOH: + case POWER_SUPPLY_PROP_CLEAR_SOH: return 1; default: break; @@ -3455,6 +3527,7 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW, POWER_SUPPLY_PROP_CYCLE_COUNTS, POWER_SUPPLY_PROP_SOC_REPORTING_READY, + POWER_SUPPLY_PROP_CLEAR_SOH, POWER_SUPPLY_PROP_SOH, POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, @@ -3583,6 +3656,8 @@ static int fg_parallel_current_en_cb(struct votable *votable, void *data, BATT_INFO_FG_CNV_CHAR_CFG(fg), rc); fg_dbg(fg, FG_STATUS, "Parallel current summing: %d\n", enable); + + vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0); return rc; } @@ -3623,6 +3698,27 @@ static int fg_gen4_delta_esr_irq_en_cb(struct votable *votable, void *data, return 0; } + +static int fg_gen4_mem_attn_irq_en_cb(struct votable *votable, void *data, + int enable, const char *client) +{ + struct fg_dev *fg = data; + + if (!fg->irqs[MEM_ATTN_IRQ].irq) + return 0; + + if (enable) { + enable_irq(fg->irqs[MEM_ATTN_IRQ].irq); + enable_irq_wake(fg->irqs[MEM_ATTN_IRQ].irq); + } else { + disable_irq_wake(fg->irqs[MEM_ATTN_IRQ].irq); + disable_irq_nosync(fg->irqs[MEM_ATTN_IRQ].irq); + } + + fg_dbg(fg, FG_STATUS, "%sabled mem_attn irq\n", enable ? "en" : "dis"); + return 0; +} + /* All init functions below this */ static int fg_alg_init(struct fg_gen4_chip *chip) @@ -4486,6 +4582,7 @@ static void fg_gen4_cleanup(struct fg_gen4_chip *chip) cancel_work(&fg->status_change_work); cancel_delayed_work_sync(&fg->profile_load_work); cancel_delayed_work_sync(&fg->sram_dump_work); + cancel_delayed_work_sync(&chip->pl_current_en_work); power_supply_unreg_notifier(&fg->nb); debugfs_remove_recursive(fg->dfs_root); @@ -4502,6 +4599,9 @@ static void fg_gen4_cleanup(struct fg_gen4_chip *chip) if (chip->parallel_current_en_votable) destroy_votable(chip->parallel_current_en_votable); + if (chip->mem_attn_irq_en_votable) + destroy_votable(chip->mem_attn_irq_en_votable); + dev_set_drvdata(fg->dev, NULL); } @@ -4543,6 +4643,7 @@ static int fg_gen4_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&fg->profile_load_work, profile_load_work); INIT_DELAYED_WORK(&fg->sram_dump_work, sram_dump_work); INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work); + INIT_DELAYED_WORK(&chip->pl_current_en_work, pl_current_en_work); fg->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb, fg); @@ -4571,6 +4672,15 @@ static int fg_gen4_probe(struct platform_device *pdev) goto exit; } + chip->mem_attn_irq_en_votable = create_votable("FG_MEM_ATTN_IRQ", + VOTE_SET_ANY, + fg_gen4_mem_attn_irq_en_cb, fg); + if (IS_ERR(chip->mem_attn_irq_en_votable)) { + rc = PTR_ERR(chip->mem_attn_irq_en_votable); + chip->mem_attn_irq_en_votable = NULL; + goto exit; + } + chip->parallel_current_en_votable = create_votable("FG_SMB_MEAS_EN", VOTE_SET_ANY, fg_parallel_current_en_cb, fg); @@ -4654,6 +4764,9 @@ static int fg_gen4_probe(struct platform_device *pdev) /* Keep BSOC_DELTA_IRQ disabled until we require it */ vote(fg->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0); + /* Keep MEM_ATTN_IRQ disabled until we require it */ + vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0); + rc = fg_debugfs_create(fg); if (rc < 0) { dev_err(fg->dev, "Error in creating debugfs entries, rc:%d\n", @@ -4741,9 +4854,6 @@ static int fg_gen4_suspend(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; - if (fg->irqs[MEM_ATTN_IRQ].irq) - disable_irq_nosync(fg->irqs[MEM_ATTN_IRQ].irq); - cancel_delayed_work_sync(&chip->ttf->ttf_work); if (fg_sram_dump) cancel_delayed_work_sync(&fg->sram_dump_work); @@ -4755,9 +4865,6 @@ static int fg_gen4_resume(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; - if (fg->irqs[MEM_ATTN_IRQ].irq) - enable_irq(fg->irqs[MEM_ATTN_IRQ].irq); - schedule_delayed_work(&chip->ttf->ttf_work, 0); if (fg_sram_dump) schedule_delayed_work(&fg->sram_dump_work, diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c index d0e9d05e76b585d7447bffa41127e2a2c14e8ef6..16d78a4ae1ec5b93bebd045a1a215da7ddbc74d9 100644 --- a/drivers/power/supply/qcom/qpnp-qg.c +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -2515,7 +2515,6 @@ static int qg_setup_battery(struct qpnp_qg *chip) return 0; } - static struct ocv_all ocv[] = { [S7_PON_OCV] = { 0, 0, "S7_PON_OCV"}, [S3_GOOD_OCV] = { 0, 0, "S3_GOOD_OCV"}, @@ -2529,7 +2528,7 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip) int rc = 0, batt_temp = 0, i; bool use_pon_ocv = true; unsigned long rtc_sec = 0; - u32 ocv_uv = 0, soc = 0, shutdown[SDAM_MAX] = {0}; + u32 ocv_uv = 0, soc = 0, pon_soc = 0, shutdown[SDAM_MAX] = {0}; char ocv_type[20] = "NONE"; if (!chip->profile_loaded) { @@ -2537,6 +2536,24 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip) return 0; } + /* read all OCVs */ + for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) { + rc = qg_read_ocv(chip, &ocv[i].ocv_uv, + &ocv[i].ocv_raw, i); + if (rc < 0) + pr_err("Failed to read %s OCV rc=%d\n", + ocv[i].ocv_type, rc); + else + qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n", + ocv[i].ocv_type, ocv[i].ocv_uv); + } + + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc) { + pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc); + goto done; + } + rc = get_rtc_time(&rtc_sec); if (rc < 0) { pr_err("Failed to read RTC time rc=%d\n", rc); @@ -2549,47 +2566,50 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip) goto use_pon_ocv; } - qg_dbg(chip, QG_DEBUG_PON, "Shutdown: Valid=%d SOC=%d OCV=%duV time=%dsecs, time_now=%ldsecs\n", + rc = lookup_soc_ocv(&pon_soc, ocv[S7_PON_OCV].ocv_uv, batt_temp, false); + if (rc < 0) { + pr_err("Failed to lookup S7_PON SOC rc=%d\n", rc); + goto done; + } + + qg_dbg(chip, QG_DEBUG_PON, "Shutdown: Valid=%d SOC=%d OCV=%duV time=%dsecs temp=%d, time_now=%ldsecs temp_now=%d S7_soc=%d\n", shutdown[SDAM_VALID], shutdown[SDAM_SOC], shutdown[SDAM_OCV_UV], shutdown[SDAM_TIME_SEC], - rtc_sec); + shutdown[SDAM_TEMP], + rtc_sec, batt_temp, + pon_soc); /* * Use the shutdown SOC if - * 1. The device was powered off for < ignore_shutdown_time - * 2. SDAM read is a success & SDAM data is valid + * 1. SDAM read is a success & SDAM data is valid + * 2. The device was powered off for < ignore_shutdown_time + * 2. Batt temp has not changed more than shutdown_temp_diff */ - if (shutdown[SDAM_VALID] && is_between(0, - chip->dt.ignore_shutdown_soc_secs, - (rtc_sec - shutdown[SDAM_TIME_SEC]))) { - use_pon_ocv = false; - ocv_uv = shutdown[SDAM_OCV_UV]; - soc = shutdown[SDAM_SOC]; - strlcpy(ocv_type, "SHUTDOWN_SOC", 20); - qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n"); - } + if (!shutdown[SDAM_VALID]) + goto use_pon_ocv; -use_pon_ocv: - if (use_pon_ocv == true) { - rc = qg_get_battery_temp(chip, &batt_temp); - if (rc < 0) { - pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc); - goto done; - } + if (!is_between(0, chip->dt.ignore_shutdown_soc_secs, + (rtc_sec - shutdown[SDAM_TIME_SEC]))) + goto use_pon_ocv; - /* read all OCVs */ - for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) { - rc = qg_read_ocv(chip, &ocv[i].ocv_uv, - &ocv[i].ocv_raw, i); - if (rc < 0) - pr_err("Failed to read %s OCV rc=%d\n", - ocv[i].ocv_type, rc); - else - qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n", - ocv[i].ocv_type, ocv[i].ocv_uv); - } + if (!is_between(0, chip->dt.shutdown_temp_diff, + abs(shutdown[SDAM_TEMP] - batt_temp))) + goto use_pon_ocv; + + if ((chip->dt.shutdown_soc_threshold != -EINVAL) && + !is_between(0, chip->dt.shutdown_soc_threshold, + abs(pon_soc - shutdown[SDAM_SOC]))) + goto use_pon_ocv; + + use_pon_ocv = false; + ocv_uv = shutdown[SDAM_OCV_UV]; + soc = shutdown[SDAM_SOC]; + strlcpy(ocv_type, "SHUTDOWN_SOC", 20); + qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n"); +use_pon_ocv: + if (use_pon_ocv == true) { if (ocv[S3_LAST_OCV].ocv_raw == FIFO_V_RESET_VAL) { if (!ocv[SDAM_PON_OCV].ocv_uv) { strlcpy(ocv_type, "S7_PON_SOC", 20); @@ -3087,6 +3107,7 @@ static int qg_alg_init(struct qpnp_qg *chip) #define DEFAULT_CL_MAX_DEC_DECIPERC 20 #define DEFAULT_CL_MIN_LIM_DECIPERC 500 #define DEFAULT_CL_MAX_LIM_DECIPERC 100 +#define DEFAULT_SHUTDOWN_TEMP_DIFF 60 /* 6 degC */ #define DEFAULT_ESR_QUAL_CURRENT_UA 130000 #define DEFAULT_ESR_QUAL_VBAT_UV 7000 #define DEFAULT_ESR_DISABLE_SOC 1000 @@ -3272,6 +3293,12 @@ static int qg_parse_dt(struct qpnp_qg *chip) else chip->dt.ignore_shutdown_soc_secs = temp; + rc = of_property_read_u32(node, "qcom,shutdown-temp-diff", &temp); + if (rc < 0) + chip->dt.shutdown_temp_diff = DEFAULT_SHUTDOWN_TEMP_DIFF; + else + chip->dt.shutdown_temp_diff = temp; + chip->dt.hold_soc_while_full = of_property_read_bool(node, "qcom,hold-soc-while-full"); @@ -3315,6 +3342,12 @@ static int qg_parse_dt(struct qpnp_qg *chip) else chip->dt.esr_min_ibat_ua = (int)temp; + rc = of_property_read_u32(node, "qcom,shutdown_soc_threshold", &temp); + if (rc < 0) + chip->dt.shutdown_soc_threshold = -EINVAL; + else + chip->dt.shutdown_soc_threshold = temp; + chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns"); /* Capacity learning params*/ diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index aa491b16c7599c22860155c58e967cc12280e8e9..d0591a479664d806fd4b625da2bfdd30d8d14ab9 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -220,6 +220,7 @@ module_param_named( ); #define PMI632_MAX_ICL_UA 3000000 +#define PM6150_MAX_FCC_UA 3000000 static int smb5_chg_config_init(struct smb5 *chip) { struct smb_charger *chg = &chip->chg; @@ -255,6 +256,8 @@ static int smb5_chg_config_init(struct smb5 *chip) chip->chg.smb_version = PM6150_SUBTYPE; chg->param = smb5_pm8150b_params; chg->name = "pm6150_charger"; + chg->wa_flags |= SW_THERM_REGULATION_WA; + chg->main_fcc_max = PM6150_MAX_FCC_UA; break; case PMI632_SUBTYPE: chip->chg.smb_version = PMI632_SUBTYPE; @@ -291,11 +294,7 @@ static int smb5_chg_config_init(struct smb5 *chip) #define MICRO_P1A 100000 #define MICRO_1PA 1000000 #define OTG_DEFAULT_DEGLITCH_TIME_MS 50 -#define MIN_WD_BARK_TIME 16 #define DEFAULT_WD_BARK_TIME 64 -#define BITE_WDOG_TIMEOUT_8S 0x3 -#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2) -#define BARK_WDOG_TIMEOUT_SHIFT 2 static int smb5_parse_dt(struct smb5 *chip) { struct smb_charger *chg = &chip->chg; @@ -443,79 +442,45 @@ static int smb5_parse_dt(struct smb5 *chip) chg->fcc_stepper_enable = of_property_read_bool(node, "qcom,fcc-stepping-enable"); - rc = of_property_match_string(node, "io-channel-names", - "usb_in_voltage"); - if (rc >= 0) { - chg->iio.usbin_v_chan = iio_channel_get(chg->dev, - "usb_in_voltage"); - if (IS_ERR(chg->iio.usbin_v_chan)) { - rc = PTR_ERR(chg->iio.usbin_v_chan); - if (rc != -EPROBE_DEFER) - dev_err(chg->dev, "USBIN_V channel unavailable, %ld\n", - rc); - chg->iio.usbin_v_chan = NULL; - return rc; - } - } + /* Extract ADC channels */ + rc = smblib_get_iio_channel(chg, "usb_in_voltage", + &chg->iio.usbin_v_chan); + if (rc < 0) + return rc; - rc = of_property_match_string(node, "io-channel-names", - "chg_temp"); - if (rc >= 0) { - chg->iio.temp_chan = iio_channel_get(chg->dev, "chg_temp"); - if (IS_ERR(chg->iio.temp_chan)) { - rc = PTR_ERR(chg->iio.temp_chan); - if (rc != -EPROBE_DEFER) - dev_err(chg->dev, "CHG_TEMP channel unavailable, %ld\n", - rc); - chg->iio.temp_chan = NULL; - return rc; - } - } + rc = smblib_get_iio_channel(chg, "chg_temp", &chg->iio.temp_chan); + if (rc < 0) + return rc; - rc = of_property_match_string(node, "io-channel-names", - "usb_in_current"); - if (rc >= 0) { - chg->iio.usbin_i_chan = iio_channel_get(chg->dev, - "usb_in_current"); - if (IS_ERR(chg->iio.usbin_i_chan)) { - rc = PTR_ERR(chg->iio.usbin_i_chan); - if (rc != -EPROBE_DEFER) - dev_err(chg->dev, "USBIN_I channel unavailable, %ld\n", - rc); - chg->iio.usbin_i_chan = NULL; - return rc; - } - } + rc = smblib_get_iio_channel(chg, "usb_in_current", + &chg->iio.usbin_i_chan); + if (rc < 0) + return rc; - rc = of_property_match_string(node, "io-channel-names", - "sbux_res"); - if (rc >= 0) { - chg->iio.sbux_chan = iio_channel_get(chg->dev, - "sbux_res"); - if (IS_ERR(chg->iio.sbux_chan)) { - rc = PTR_ERR(chg->iio.sbux_chan); - if (rc != -EPROBE_DEFER) - dev_err(chg->dev, "USBIN_V channel unavailable, %ld\n", - rc); - chg->iio.sbux_chan = NULL; - return rc; - } - } + rc = smblib_get_iio_channel(chg, "sbux_res", &chg->iio.sbux_chan); + if (rc < 0) + return rc; - rc = of_property_match_string(node, "io-channel-names", - "vph_voltage"); - if (rc >= 0) { - chg->iio.vph_v_chan = iio_channel_get(chg->dev, - "vph_voltage"); - if (IS_ERR(chg->iio.vph_v_chan)) { - rc = PTR_ERR(chg->iio.vph_v_chan); - if (rc != -EPROBE_DEFER) - dev_err(chg->dev, "vph_voltage channel unavailable, %ld\n", - rc); - chg->iio.vph_v_chan = NULL; - return rc; - } - } + rc = smblib_get_iio_channel(chg, "vph_voltage", &chg->iio.vph_v_chan); + if (rc < 0) + return rc; + + rc = smblib_get_iio_channel(chg, "die_temp", &chg->iio.die_temp_chan); + if (rc < 0) + return rc; + + rc = smblib_get_iio_channel(chg, "conn_temp", + &chg->iio.connector_temp_chan); + if (rc < 0) + return rc; + + rc = smblib_get_iio_channel(chg, "skin_temp", &chg->iio.skin_temp_chan); + if (rc < 0) + return rc; + + rc = smblib_get_iio_channel(chg, "smb_temp", &chg->iio.smb_temp_chan); + if (rc < 0) + return rc; return 0; } @@ -687,7 +652,9 @@ static int smb5_usb_get_prop(struct power_supply *psy, : POWER_SUPPLY_SCOPE_UNKNOWN; break; case POWER_SUPPLY_PROP_SMB_EN_MODE: + mutex_lock(&chg->smb_lock); val->intval = chg->sec_chg_selected; + mutex_unlock(&chg->smb_lock); break; case POWER_SUPPLY_PROP_SMB_EN_REASON: val->intval = chg->cp_reason; @@ -919,6 +886,8 @@ static enum power_supply_property smb5_usb_main_props[] = { POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_FLASH_ACTIVE, POWER_SUPPLY_PROP_FLASH_TRIGGER, + POWER_SUPPLY_PROP_TOGGLE_STAT, + POWER_SUPPLY_PROP_MAIN_FCC_MAX, }; static int smb5_usb_main_get_prop(struct power_supply *psy, @@ -958,6 +927,12 @@ static int smb5_usb_main_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_FLASH_TRIGGER: rc = schgm_flash_get_vreg_ok(chg, &val->intval); break; + case POWER_SUPPLY_PROP_TOGGLE_STAT: + val->intval = 0; + break; + case POWER_SUPPLY_PROP_MAIN_FCC_MAX: + val->intval = chg->main_fcc_max; + break; default: pr_debug("get prop %d is not supported in usb-main\n", psp); rc = -EINVAL; @@ -992,6 +967,13 @@ static int smb5_usb_main_set_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_FLASH_ACTIVE: chg->flash_active = val->intval; break; + case POWER_SUPPLY_PROP_TOGGLE_STAT: + rc = smblib_toggle_smb_en(chg, val->intval); + break; + case POWER_SUPPLY_PROP_MAIN_FCC_MAX: + chg->main_fcc_max = val->intval; + rerun_election(chg->fcc_votable); + break; default: pr_err("set prop %d is not supported\n", psp); rc = -EINVAL; @@ -1001,6 +983,24 @@ static int smb5_usb_main_set_prop(struct power_supply *psy, return rc; } +static int smb5_usb_main_prop_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + int rc; + + switch (psp) { + case POWER_SUPPLY_PROP_TOGGLE_STAT: + case POWER_SUPPLY_PROP_MAIN_FCC_MAX: + rc = 1; + break; + default: + rc = 0; + break; + } + + return rc; +} + static const struct power_supply_desc usb_main_psy_desc = { .name = "main", .type = POWER_SUPPLY_TYPE_MAIN, @@ -1008,6 +1008,7 @@ static const struct power_supply_desc usb_main_psy_desc = { .num_properties = ARRAY_SIZE(smb5_usb_main_props), .get_property = smb5_usb_main_get_prop, .set_property = smb5_usb_main_set_prop, + .property_is_writeable = smb5_usb_main_prop_is_writeable, }; static int smb5_init_usb_main_psy(struct smb5 *chip) @@ -1304,6 +1305,7 @@ static int smb5_batt_get_prop(struct power_supply *psy, break; case POWER_SUPPLY_PROP_FORCE_RECHARGE: val->intval = 0; + break; case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE: val->intval = chg->fcc_stepper_enable; break; @@ -1555,20 +1557,18 @@ static int smb5_configure_typec(struct smb_charger *chg) return rc; } - rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_1_REG, - TYPEC_CCOUT_DETACH_INT_EN_BIT | - TYPEC_CCOUT_ATTACH_INT_EN_BIT); + /* Use simple write to clear interrupts */ + rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_1_REG, 0); if (rc < 0) { dev_err(chg->dev, "Couldn't configure Type-C interrupts rc=%d\n", rc); return rc; } - rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, + /* Use simple write to enable only required interrupts */ + rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, TYPEC_SRC_BATT_HPWR_INT_EN_BIT | - TYPEC_WATER_DETECTION_INT_EN_BIT, - TYPEC_SRC_BATT_HPWR_INT_EN_BIT - | TYPEC_WATER_DETECTION_INT_EN_BIT); + TYPEC_WATER_DETECTION_INT_EN_BIT); if (rc < 0) { dev_err(chg->dev, "Couldn't configure Type-C interrupts rc=%d\n", rc); @@ -1747,6 +1747,21 @@ static int smb5_init_hw(struct smb5 *chip) return rc; } + /* + * If SW thermal regulation WA is active then all the HW temperature + * comparators need to be disabled to prevent HW thermal regulation, + * apart from DIE_TEMP analog comparator for SHDN regulation. + */ + if (chg->wa_flags & SW_THERM_REGULATION_WA) { + rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG, + THERMREG_DIE_CMP_SRC_EN_BIT); + if (rc < 0) { + dev_err(chg->dev, "Couldn't disable HW thermal regulation rc=%d\n", + rc); + return rc; + } + } + /* Use SW based VBUS control, disable HW autonomous mode */ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, @@ -2147,6 +2162,7 @@ static int smb5_determine_initial_status(struct smb5 *chip) batt_temp_changed_irq_handler(0, &irq_data); wdog_bark_irq_handler(0, &irq_data); typec_or_rid_detection_change_irq_handler(0, &irq_data); + wdog_snarl_irq_handler(0, &irq_data); return 0; } @@ -2344,10 +2360,13 @@ static struct smb_irq_info smb5_irqs[] = { /* MISCELLANEOUS IRQs */ [WDOG_SNARL_IRQ] = { .name = "wdog-snarl", + .handler = wdog_snarl_irq_handler, + .wake = true, }, [WDOG_BARK_IRQ] = { .name = "wdog-bark", .handler = wdog_bark_irq_handler, + .wake = true, }, [AICL_FAIL_IRQ] = { .name = "aicl-fail", @@ -2476,6 +2495,13 @@ static int smb5_request_interrupts(struct smb5 *chip) if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq) chg->usb_icl_change_irq_enabled = true; + /* + * Disable WDOG SNARL IRQ by default to prevent IRQ storm. If required + * for any application, enable it through votable. + */ + if (chg->irq_info[WDOG_SNARL_IRQ].irq) + vote(chg->wdog_snarl_irq_en_votable, DEFAULT_VOTER, false, 0); + return rc; } @@ -2634,6 +2660,7 @@ static int smb5_probe(struct platform_device *pdev) chg->die_health = -EINVAL; chg->connector_health = -EINVAL; chg->otg_present = false; + chg->main_fcc_max = -EINVAL; chg->regmap = dev_get_regmap(chg->dev->parent, NULL); if (!chg->regmap) { diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c index df22dec9972b1aaa1783595e011bfa56448f5c1d..3e59a24c47e2654f84aa21f2b21f21d9ab0ffe36 100644 --- a/drivers/power/supply/qcom/smb1390-charger.c +++ b/drivers/power/supply/qcom/smb1390-charger.c @@ -81,13 +81,14 @@ #define ATEST1_SEL_MASK GENMASK(6, 0) #define ISNS_INT_VAL 0x09 -#define CP_VOTER "CP_VOTER" -#define USER_VOTER "USER_VOTER" -#define ILIM_VOTER "ILIM_VOTER" -#define FCC_VOTER "FCC_VOTER" -#define ICL_VOTER "ICL_VOTER" -#define WIRELESS_VOTER "WIRELESS_VOTER" -#define SRC_VOTER "SRC_VOTER" +#define CP_VOTER "CP_VOTER" +#define USER_VOTER "USER_VOTER" +#define ILIM_VOTER "ILIM_VOTER" +#define FCC_VOTER "FCC_VOTER" +#define ICL_VOTER "ICL_VOTER" +#define WIRELESS_VOTER "WIRELESS_VOTER" +#define SRC_VOTER "SRC_VOTER" +#define SWITCHER_TOGGLE_VOTER "SWITCHER_TOGGLE_VOTER" enum { SWITCHER_OFF_WINDOW_IRQ = 0, @@ -110,6 +111,7 @@ struct smb1390 { struct regmap *regmap; struct notifier_block nb; struct class cp_class; + struct wakeup_source *cp_ws; /* work structs */ struct work_struct status_change_work; @@ -123,6 +125,7 @@ struct smb1390 { struct votable *disable_votable; struct votable *ilim_votable; struct votable *fcc_votable; + struct votable *cp_awake_votable; /* power supplies */ struct power_supply *usb_psy; @@ -133,6 +136,7 @@ struct smb1390 { bool status_change_running; bool taper_work_running; struct smb1390_iio iio; + int irq_status; }; struct smb_irq { @@ -205,42 +209,28 @@ static bool is_psy_voter_available(struct smb1390 *chip) return true; } -static irqreturn_t default_irq_handler(int irq, void *data) +static void cp_toggle_switcher(struct smb1390 *chip) { - struct smb1390 *chip = data; - int i; + vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, true, 0); - for (i = 0; i < NUM_IRQS; ++i) { - if (irq == chip->irqs[i]) - pr_debug("%s IRQ triggered\n", smb_irqs[i].name); - } + /* Delay for toggling switcher */ + usleep_range(20, 30); - kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); - return IRQ_HANDLED; + vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, false, 0); } -static irqreturn_t irev_irq_handler(int irq, void *data) +static irqreturn_t default_irq_handler(int irq, void *data) { struct smb1390 *chip = data; - int rc; - - pr_debug("IREV IRQ triggered\n"); - - rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, - CMD_EN_SWITCHER_BIT, 0); - if (rc < 0) { - pr_err("Couldn't disable switcher by command mode\n"); - goto out; - } + int i; - rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, - CMD_EN_SWITCHER_BIT, 1); - if (rc < 0) { - pr_err("Couldn't enable switcher by command mode\n"); - goto out; + for (i = 0; i < NUM_IRQS; ++i) { + if (irq == chip->irqs[i]) { + pr_debug("%s IRQ triggered\n", smb_irqs[i].name); + chip->irq_status |= 1 << i; + } } -out: kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); return IRQ_HANDLED; } @@ -263,7 +253,7 @@ static const struct smb_irq smb_irqs[] = { }, [IREV_IRQ] = { .name = "irev-fault", - .handler = irev_irq_handler, + .handler = default_irq_handler, .wake = true, }, [VPH_OV_HARD_IRQ] = { @@ -340,6 +330,40 @@ static ssize_t enable_store(struct class *c, struct class_attribute *attr, } static CLASS_ATTR_RW(enable); +static ssize_t cp_irq_show(struct class *c, struct class_attribute *attr, + char *buf) +{ + struct smb1390 *chip = container_of(c, struct smb1390, cp_class); + int rc, val; + + rc = smb1390_read(chip, CORE_INT_RT_STS_REG, &val); + if (rc < 0) + return -EINVAL; + + val |= chip->irq_status; + chip->irq_status = 0; + + return snprintf(buf, PAGE_SIZE, "%x\n", val); +} +static CLASS_ATTR_RO(cp_irq); + +static ssize_t toggle_switcher_store(struct class *c, + struct class_attribute *attr, const char *buf, + size_t count) +{ + struct smb1390 *chip = container_of(c, struct smb1390, cp_class); + unsigned long val; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val) + cp_toggle_switcher(chip); + + return count; +} +static CLASS_ATTR_WO(toggle_switcher); + static ssize_t die_temp_show(struct class *c, struct class_attribute *attr, char *buf) { @@ -405,6 +429,8 @@ static struct attribute *cp_class_attrs[] = { &class_attr_stat1.attr, &class_attr_stat2.attr, &class_attr_enable.attr, + &class_attr_cp_irq.attr, + &class_attr_toggle_switcher.attr, &class_attr_die_temp.attr, &class_attr_isns.attr, NULL, @@ -427,7 +453,9 @@ static int smb1390_disable_vote_cb(struct votable *votable, void *data, if (rc < 0) return rc; + vote(chip->cp_awake_votable, CP_VOTER, false, 0); } else { + vote(chip->cp_awake_votable, CP_VOTER, true, 0); rc = smb1390_masked_write(chip, CORE_CONTROL1_REG, CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT); if (rc < 0) @@ -472,6 +500,20 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data, return rc; } +static int smb1390_awake_vote_cb(struct votable *votable, void *data, + int awake, const char *client) +{ + struct smb1390 *chip = data; + + if (awake) + __pm_stay_awake(chip->cp_ws); + else + __pm_relax(chip->cp_ws); + + pr_debug("client: %s awake: %d\n", client, awake); + return 0; +} + static int smb1390_notifier_cb(struct notifier_block *nb, unsigned long event, void *data) { @@ -658,6 +700,11 @@ static int smb1390_create_votables(struct smb1390 *chip) if (IS_ERR(chip->ilim_votable)) return PTR_ERR(chip->ilim_votable); + chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY, + smb1390_awake_vote_cb, chip); + if (IS_ERR(chip->cp_awake_votable)) + return PTR_ERR(chip->cp_awake_votable); + return 0; } @@ -795,6 +842,10 @@ static int smb1390_probe(struct platform_device *pdev) goto out_work; } + chip->cp_ws = wakeup_source_register("qcom-chargepump"); + if (!chip->cp_ws) + return rc; + rc = smb1390_create_votables(chip); if (rc < 0) { pr_err("Couldn't create votables rc=%d\n", rc); @@ -842,6 +893,7 @@ static int smb1390_probe(struct platform_device *pdev) out_work: cancel_work(&chip->taper_work); cancel_work(&chip->status_change_work); + wakeup_source_unregister(chip->cp_ws); return rc; } @@ -856,6 +908,7 @@ static int smb1390_remove(struct platform_device *pdev) vote(chip->disable_votable, USER_VOTER, true, 0); cancel_work(&chip->taper_work); cancel_work(&chip->status_change_work); + wakeup_source_unregister(chip->cp_ws); smb1390_destroy_votables(chip); smb1390_release_channels(chip); return 0; diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 943839c7afdba9d58dbdc231d9b74e4fca83af9a..f086b5ea1225538e189b266a2d45be7bd5102aba 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -79,6 +79,52 @@ int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val) return regmap_update_bits(chg->regmap, addr, mask, val); } +int smblib_get_iio_channel(struct smb_charger *chg, const char *propname, + struct iio_channel **chan) +{ + int rc = 0; + + rc = of_property_match_string(chg->dev->of_node, + "io-channel-names", propname); + if (rc < 0) + return 0; + + *chan = iio_channel_get(chg->dev, propname); + if (IS_ERR(*chan)) { + rc = PTR_ERR(*chan); + if (rc != -EPROBE_DEFER) + smblib_err(chg, "%s channel unavailable, %d\n", + propname, rc); + *chan = NULL; + } + + return rc; +} + +#define DIV_FACTOR_MICRO_V_I 1 +#define DIV_FACTOR_MILI_V_I 1000 +#define DIV_FACTOR_DECIDEGC 100 +int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan, + int div, int *data) +{ + int rc = 0; + *data = -ENODATA; + + if (chan) { + rc = iio_read_channel_processed(chan, data); + if (rc < 0) { + smblib_err(chg, "Error in reading IIO channel data, rc=%d\n", + rc); + return rc; + } + + if (div != 0) + *data /= div; + } + + return rc; +} + int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua) { int rc, cc_minus_ua; @@ -857,6 +903,7 @@ static void smblib_uusb_removal(struct smb_charger *chg) struct smb_irq_data *data; struct storm_watch *wdata; + mutex_lock(&chg->smb_lock); chg->cp_reason = POWER_SUPPLY_CP_NONE; rc = smblib_select_sec_charger(chg, chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : @@ -864,6 +911,7 @@ static void smblib_uusb_removal(struct smb_charger *chg) if (rc < 0) dev_err(chg->dev, "Couldn't disable secondary charger rc=%d\n", rc); + mutex_unlock(&chg->smb_lock); cancel_delayed_work_sync(&chg->pl_enable_work); @@ -886,6 +934,14 @@ static void smblib_uusb_removal(struct smb_charger *chg) vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA); vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0); + /* Remove SW thermal regulation WA votes */ + vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0); + vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER, false, 0); + vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER, false, 0); + if (chg->cp_disable_votable) + vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER, + false, 0); + /* reconfigure allowed voltage for HVDCP */ rc = smblib_set_adapter_allowance(chg, USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); @@ -1166,6 +1222,47 @@ int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua) return 0; } +int smblib_toggle_smb_en(struct smb_charger *chg, int toggle) +{ + int rc = 0; + + if (!toggle) + return rc; + + mutex_lock(&chg->smb_lock); + + if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) { + /* Pull down SMB_EN pin */ + rc = smblib_select_sec_charger(chg, + POWER_SUPPLY_CHARGER_SEC_NONE); + if (rc < 0) { + dev_err(chg->dev, "Couldn't disable SMB_EN pin rc=%d\n", + rc); + goto out; + } + + /* + * A minimum of 20us delay is expected before switching on STAT + * pin. + */ + usleep_range(20, 30); + + /* Pull up SMB_EN pin and enable Charge Pump under HW control */ + rc = smblib_select_sec_charger(chg, + POWER_SUPPLY_CHARGER_SEC_CP); + if (rc < 0) { + dev_err(chg->dev, "Couldn't enable CP rc=%d\n", + rc); + goto out; + } + } + +out: + mutex_unlock(&chg->smb_lock); + + return rc; +} + /********************* * VOTABLE CALLBACKS * *********************/ @@ -1237,6 +1334,25 @@ static int smblib_usb_irq_enable_vote_callback(struct votable *votable, return 0; } +static int smblib_wdog_snarl_irq_en_vote_callback(struct votable *votable, + void *data, int enable, const char *client) +{ + struct smb_charger *chg = data; + + if (!chg->irq_info[WDOG_SNARL_IRQ].irq) + return 0; + + if (enable) { + enable_irq(chg->irq_info[WDOG_SNARL_IRQ].irq); + enable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq); + } else { + disable_irq_wake(chg->irq_info[WDOG_SNARL_IRQ].irq); + disable_irq_nosync(chg->irq_info[WDOG_SNARL_IRQ].irq); + } + + return 0; +} + /******************* * VCONN REGULATOR * * *****************/ @@ -2058,6 +2174,268 @@ int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable) return 0; } +static int smblib_set_sw_thermal_regulation(struct smb_charger *chg, + bool enable) +{ + int rc = 0; + + if (!(chg->wa_flags & SW_THERM_REGULATION_WA)) + return rc; + + if (enable) { + /* + * Configure min time to quickly address thermal + * condition. + */ + rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG, + SNARL_WDOG_TIMEOUT_MASK, SNARL_WDOG_TMOUT_62P5MS); + if (rc < 0) { + smblib_err(chg, "Couldn't configure snarl wdog tmout, rc=%d\n", + rc); + return rc; + } + + vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER, + true, 0); + /* + * Schedule SW_THERM_REGULATION_WORK directly if USB input + * is suspended due to SW thermal regulation WA since WDOG + * IRQ won't trigger with input suspended. + */ + if (is_client_vote_enabled(chg->usb_icl_votable, + SW_THERM_REGULATION_VOTER)) { + vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, + true, 0); + schedule_delayed_work(&chg->thermal_regulation_work, 0); + } + } else { + vote(chg->wdog_snarl_irq_en_votable, SW_THERM_REGULATION_VOTER, + false, 0); + cancel_delayed_work_sync(&chg->thermal_regulation_work); + vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, false, 0); + } + + smblib_dbg(chg, PR_MISC, "WDOG SNARL INT %s\n", + enable ? "Enabled" : "Disabled"); + + return rc; +} + +static int smblib_update_thermal_readings(struct smb_charger *chg) +{ + union power_supply_propval pval = {0, }; + int rc = 0; + + if (!chg->pl.psy) + chg->pl.psy = power_supply_get_by_name("parallel"); + + rc = smblib_read_iio_channel(chg, chg->iio.die_temp_chan, + DIV_FACTOR_DECIDEGC, &chg->die_temp); + if (rc < 0) { + smblib_err(chg, "Couldn't read DIE TEMP channel, rc=%d\n", rc); + return rc; + } + + rc = smblib_read_iio_channel(chg, chg->iio.connector_temp_chan, + DIV_FACTOR_DECIDEGC, &chg->connector_temp); + if (rc < 0) { + smblib_err(chg, "Couldn't read CONN TEMP channel, rc=%d\n", rc); + return rc; + } + + rc = smblib_read_iio_channel(chg, chg->iio.skin_temp_chan, + DIV_FACTOR_DECIDEGC, &chg->skin_temp); + if (rc < 0) { + smblib_err(chg, "Couldn't read SKIN TEMP channel, rc=%d\n", rc); + return rc; + } + + if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) { + rc = smblib_read_iio_channel(chg, chg->iio.smb_temp_chan, + DIV_FACTOR_DECIDEGC, &chg->smb_temp); + if (rc < 0) { + smblib_err(chg, "Couldn't read SMB TEMP channel, rc=%d\n", + rc); + return rc; + } + } else if (chg->pl.psy && chg->sec_chg_selected == + POWER_SUPPLY_CHARGER_SEC_PL) { + rc = power_supply_get_property(chg->pl.psy, + POWER_SUPPLY_PROP_CHARGER_TEMP, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't get smb charger temp, rc=%d\n", + rc); + return rc; + } + chg->smb_temp = pval.intval; + } else { + chg->smb_temp = -ENODATA; + } + + return rc; +} + +/* SW thermal regulation thresholds in deciDegC */ +#define DIE_TEMP_RST_THRESH 1000 +#define DIE_TEMP_REG_H_THRESH 800 +#define DIE_TEMP_REG_L_THRESH 600 + +#define CONNECTOR_TEMP_SHDN_THRESH 700 +#define CONNECTOR_TEMP_RST_THRESH 600 +#define CONNECTOR_TEMP_REG_H_THRESH 550 +#define CONNECTOR_TEMP_REG_L_THRESH 500 + +#define SMB_TEMP_SHDN_THRESH 1400 +#define SMB_TEMP_RST_THRESH 900 +#define SMB_TEMP_REG_H_THRESH 800 +#define SMB_TEMP_REG_L_THRESH 600 + +#define SKIN_TEMP_SHDN_THRESH 700 +#define SKIN_TEMP_RST_THRESH 600 +#define SKIN_TEMP_REG_H_THRESH 550 +#define SKIN_TEMP_REG_L_THRESH 500 + +#define THERM_REG_RECHECK_DELAY_1S 1000 /* 1 sec */ +#define THERM_REG_RECHECK_DELAY_8S 8000 /* 8 sec */ +static int smblib_process_thermal_readings(struct smb_charger *chg) +{ + int rc = 0, wdog_timeout = SNARL_WDOG_TMOUT_8S; + u32 thermal_status = TEMP_BELOW_RANGE; + bool suspend_input = false, disable_smb = false; + + /* + * Following is the SW thermal regulation flow: + * + * TEMP_SHUT_DOWN_LEVEL: If either connector temp or skin temp + * exceeds their respective SHDN threshold. Need to suspend input + * and secondary charger. + * + * TEMP_SHUT_DOWN_SMB_LEVEL: If smb temp exceed its SHDN threshold + * but connector and skin temp are below it. Need to suspend SMB. + * + * TEMP_ALERT_LEVEL: If die, connector, smb or skin temp exceeds it's + * respective RST threshold. Stay put and monitor temperature closely. + * + * TEMP_ABOVE_RANGE or TEMP_WITHIN_RANGE or TEMP_BELOW_RANGE: If die, + * connector, smb or skin temp exceeds it's respective REG_H or REG_L + * threshold. Unsuspend input and SMB. + */ + if (chg->connector_temp > CONNECTOR_TEMP_SHDN_THRESH || + chg->skin_temp > SKIN_TEMP_SHDN_THRESH) { + thermal_status = TEMP_SHUT_DOWN; + wdog_timeout = SNARL_WDOG_TMOUT_1S; + suspend_input = true; + disable_smb = true; + goto out; + } + + if (chg->smb_temp > SMB_TEMP_SHDN_THRESH) { + thermal_status = TEMP_SHUT_DOWN_SMB; + wdog_timeout = SNARL_WDOG_TMOUT_1S; + disable_smb = true; + goto out; + } + + if (chg->connector_temp > CONNECTOR_TEMP_RST_THRESH || + chg->skin_temp > SKIN_TEMP_RST_THRESH || + chg->smb_temp > SMB_TEMP_RST_THRESH || + chg->die_temp > DIE_TEMP_RST_THRESH) { + thermal_status = TEMP_ALERT_LEVEL; + wdog_timeout = SNARL_WDOG_TMOUT_1S; + goto out; + } + + if (chg->connector_temp > CONNECTOR_TEMP_REG_H_THRESH || + chg->skin_temp > SKIN_TEMP_REG_H_THRESH || + chg->smb_temp > SMB_TEMP_REG_H_THRESH || + chg->die_temp > DIE_TEMP_REG_H_THRESH) { + thermal_status = TEMP_ABOVE_RANGE; + wdog_timeout = SNARL_WDOG_TMOUT_1S; + goto out; + } + + if (chg->connector_temp > CONNECTOR_TEMP_REG_L_THRESH || + chg->skin_temp > SKIN_TEMP_REG_L_THRESH || + chg->smb_temp > SMB_TEMP_REG_L_THRESH || + chg->die_temp > DIE_TEMP_REG_L_THRESH) { + thermal_status = TEMP_WITHIN_RANGE; + wdog_timeout = SNARL_WDOG_TMOUT_8S; + } +out: + smblib_dbg(chg, PR_MISC, "Current temperatures: \tDIE_TEMP: %d,\tCONN_TEMP: %d,\tSMB_TEMP: %d,\tSKIN_TEMP: %d\nTHERMAL_STATUS: %d\n", + chg->die_temp, chg->connector_temp, chg->smb_temp, + chg->skin_temp, thermal_status); + + if (thermal_status != chg->thermal_status) { + chg->thermal_status = thermal_status; + /* + * If thermal level changes to TEMP ALERT LEVEL, don't + * enable/disable main/parallel charging. + */ + if (chg->thermal_status == TEMP_ALERT_LEVEL) + goto exit; + + /* Enable/disable SMB_EN pin */ + rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG, + SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT, + (disable_smb ? SMB_EN_OVERRIDE_BIT : + (SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT))); + if (rc < 0) + smblib_err(chg, "Couldn't set SMB_EN, rc=%d\n", rc); + + /* + * Enable/disable secondary charger through votables to ensure + * that if SMB_EN pin get's toggled somehow, secondary charger + * remains enabled/disabled according to SW thermal regulation. + */ + if (!chg->cp_disable_votable) + chg->cp_disable_votable = find_votable("CP_DISABLE"); + if (chg->cp_disable_votable) + vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER, + disable_smb, 0); + + vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER, + disable_smb, 0); + smblib_dbg(chg, PR_MISC, "Parallel %s as per SW thermal regulation\n", + disable_smb ? "disabled" : "enabled"); + + /* + * If thermal level changes to TEMP_SHUT_DOWN_SMB, don't + * enable/disable main charger. + */ + if (chg->thermal_status == TEMP_SHUT_DOWN_SMB) + goto exit; + + /* Suspend input if SHDN threshold reached */ + vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER, + suspend_input, 0); + vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, + suspend_input, 0); + smblib_dbg(chg, PR_MISC, "USB/DC %s as per SW thermal regulation\n", + suspend_input ? "suspended" : "unsuspended"); + } +exit: + /* + * On USB suspend, WDOG IRQ stops triggering. To continue thermal + * monitoring and regulation until USB is plugged out, reschedule + * the SW thermal regulation work without releasing the wake lock. + */ + if (is_client_vote_enabled(chg->usb_icl_votable, + SW_THERM_REGULATION_VOTER)) { + schedule_delayed_work(&chg->thermal_regulation_work, + msecs_to_jiffies(THERM_REG_RECHECK_DELAY_1S)); + return 0; + } + + rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG, + SNARL_WDOG_TIMEOUT_MASK, wdog_timeout); + if (rc < 0) + smblib_err(chg, "Couldn't set WD SNARL timer, rc=%d\n", rc); + + vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, false, 0); + return rc; +} + /******************* * DC PSY GETTERS * *******************/ @@ -2685,6 +3063,22 @@ int smblib_get_prop_die_health(struct smb_charger *chg) int rc; u8 stat; + if (chg->wa_flags & SW_THERM_REGULATION_WA) { + if (chg->die_temp == -ENODATA) + return POWER_SUPPLY_HEALTH_UNKNOWN; + + if (chg->die_temp > DIE_TEMP_RST_THRESH) + return POWER_SUPPLY_HEALTH_OVERHEAT; + + if (chg->die_temp > DIE_TEMP_REG_H_THRESH) + return POWER_SUPPLY_HEALTH_HOT; + + if (chg->die_temp > DIE_TEMP_REG_L_THRESH) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; + } + rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &stat); if (rc < 0) { smblib_err(chg, "Couldn't read DIE_TEMP_STATUS_REG, rc=%d\n", @@ -2709,10 +3103,26 @@ int smblib_get_prop_connector_health(struct smb_charger *chg) int rc; u8 stat; + if (chg->wa_flags & SW_THERM_REGULATION_WA) { + if (chg->connector_temp == -ENODATA) + return POWER_SUPPLY_HEALTH_UNKNOWN; + + if (chg->connector_temp > CONNECTOR_TEMP_RST_THRESH) + return POWER_SUPPLY_HEALTH_OVERHEAT; + + if (chg->connector_temp > CONNECTOR_TEMP_REG_H_THRESH) + return POWER_SUPPLY_HEALTH_HOT; + + if (chg->connector_temp > CONNECTOR_TEMP_REG_L_THRESH) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; + } + rc = smblib_read(chg, CONNECTOR_TEMP_STATUS_REG, &stat); if (rc < 0) { smblib_err(chg, "Couldn't read CONNECTOR_TEMP_STATUS_REG, rc=%d\n", - rc); + rc); return POWER_SUPPLY_HEALTH_UNKNOWN; } @@ -3025,6 +3435,7 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, * For PPS, Charge Pump is preferred over parallel charger if * present. */ + mutex_lock(&chg->smb_lock); if (chg->pd_active == POWER_SUPPLY_PD_PPS_ACTIVE && chg->sec_cp_present) { rc = smblib_select_sec_charger(chg, @@ -3035,11 +3446,13 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, else chg->cp_reason = POWER_SUPPLY_CP_PPS; } + mutex_unlock(&chg->smb_lock); } else { vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA); vote(chg->usb_icl_votable, PD_VOTER, false, 0); vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0); + mutex_lock(&chg->smb_lock); chg->cp_reason = POWER_SUPPLY_CP_NONE; rc = smblib_select_sec_charger(chg, chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : @@ -3048,6 +3461,7 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, dev_err(chg->dev, "Couldn't enable secondary charger rc=%d\n", rc); + mutex_unlock(&chg->smb_lock); /* PD hard resets failed, rerun apsd */ if (chg->ok_to_pd) { @@ -3506,6 +3920,12 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) if (rc < 0) smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc); + /* Enable SW Thermal regulation */ + rc = smblib_set_sw_thermal_regulation(chg, true); + if (rc < 0) + smblib_err(chg, "Couldn't start SW thermal regulation WA, rc=%d\n", + rc); + /* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */ if (chg->fcc_stepper_enable) vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0); @@ -3515,6 +3935,12 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) schedule_delayed_work(&chg->pl_enable_work, msecs_to_jiffies(PL_DELAY_MS)); } else { + /* Disable SW Thermal Regulation */ + rc = smblib_set_sw_thermal_regulation(chg, false); + if (rc < 0) + smblib_err(chg, "Couldn't stop SW thermal regulation WA, rc=%d\n", + rc); + if (chg->wa_flags & BOOST_BACK_WA) { data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data; if (data) { @@ -3593,6 +4019,7 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, /* for QC3, switch to CP if present */ if ((apsd_result->bit & QC_3P0_BIT) && chg->sec_cp_present) { + mutex_lock(&chg->smb_lock); rc = smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_CP); if (rc < 0) @@ -3600,6 +4027,7 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, "Couldn't enable secondary chargers rc=%d\n", rc); else chg->cp_reason = POWER_SUPPLY_CP_HVDCP3; + mutex_unlock(&chg->smb_lock); } smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n", @@ -3932,6 +4360,7 @@ static void typec_src_removal(struct smb_charger *chg) struct smb_irq_data *data; struct storm_watch *wdata; + mutex_lock(&chg->smb_lock); chg->cp_reason = POWER_SUPPLY_CP_NONE; rc = smblib_select_sec_charger(chg, chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : @@ -3939,6 +4368,7 @@ static void typec_src_removal(struct smb_charger *chg) if (rc < 0) dev_err(chg->dev, "Couldn't disable secondary charger rc=%d\n", rc); + mutex_unlock(&chg->smb_lock); /* disable apsd */ rc = smblib_configure_hvdcp_apsd(chg, false); @@ -3980,6 +4410,14 @@ static void typec_src_removal(struct smb_charger *chg) vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0); vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); + /* Remove SW thermal regulation WA votes */ + vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0); + vote(chg->pl_disable_votable, SW_THERM_REGULATION_VOTER, false, 0); + vote(chg->dc_suspend_votable, SW_THERM_REGULATION_VOTER, false, 0); + if (chg->cp_disable_votable) + vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER, + false, 0); + chg->pulse_cnt = 0; chg->usb_icl_delta_ua = 0; chg->voltage_min_uv = MICRO_5V; @@ -4080,8 +4518,7 @@ irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data) goto out; } - /* liquid presence detected, to check further */ - if ((stat & TYPEC_WATER_DETECTION_STATUS_BIT) + if (!(stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT) && chg->lpd_stage == LPD_STAGE_NONE) { chg->lpd_stage = LPD_STAGE_FLOAT; cancel_delayed_work_sync(&chg->lpd_ra_open_work); @@ -4141,7 +4578,7 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data) } if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) { - chg->lpd_stage = LPD_STAGE_ATTACHED; + chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL; cancel_delayed_work_sync(&chg->lpd_ra_open_work); vote(chg->awake_votable, LPD_VOTER, false, 0); @@ -4180,8 +4617,8 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data) chg->early_usb_attach = false; } - chg->lpd_stage = LPD_STAGE_DETACHED; - schedule_delayed_work(&chg->lpd_detach_work, + if (chg->lpd_stage == LPD_STAGE_FLOAT_CANCEL) + schedule_delayed_work(&chg->lpd_detach_work, msecs_to_jiffies(100)); } @@ -4230,14 +4667,17 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data) dev_err(chg->dev, "Couldn't set dc voltage to 2*vph rc=%d\n", rc); + mutex_lock(&chg->smb_lock); chg->cp_reason = POWER_SUPPLY_CP_WIRELESS; rc = smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_CP); if (rc < 0) dev_err(chg->dev, "Couldn't enable secondary chargers rc=%d\n", rc); + mutex_unlock(&chg->smb_lock); } } else if (chg->cp_reason == POWER_SUPPLY_CP_WIRELESS) { + mutex_lock(&chg->smb_lock); chg->cp_reason = POWER_SUPPLY_CP_NONE; rc = smblib_select_sec_charger(chg, chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : @@ -4246,6 +4686,7 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data) dev_err(chg->dev, "Couldn't disable secondary charger rc=%d\n", rc); + mutex_unlock(&chg->smb_lock); } power_supply_changed(chg->dc_psy); @@ -4344,6 +4785,22 @@ irqreturn_t switcher_power_ok_irq_handler(int irq, void *data) return IRQ_HANDLED; } +irqreturn_t wdog_snarl_irq_handler(int irq, void *data) +{ + struct smb_irq_data *irq_data = data; + struct smb_charger *chg = irq_data->parent_data; + + smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name); + + if (chg->wa_flags & SW_THERM_REGULATION_WA) { + cancel_delayed_work_sync(&chg->thermal_regulation_work); + vote(chg->awake_votable, SW_THERM_REGULATION_VOTER, true, 0); + schedule_delayed_work(&chg->thermal_regulation_work, 0); + } + + return IRQ_HANDLED; +} + irqreturn_t wdog_bark_irq_handler(int irq, void *data) { struct smb_irq_data *irq_data = data; @@ -4512,7 +4969,9 @@ static void pl_update_work(struct work_struct *work) if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) return; + mutex_lock(&chg->smb_lock); smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_PL); + mutex_unlock(&chg->smb_lock); } static void clear_hdc_work(struct work_struct *work) @@ -4552,6 +5011,23 @@ static void smblib_pl_enable_work(struct work_struct *work) vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); } +static void smblib_thermal_regulation_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + thermal_regulation_work.work); + int rc; + + rc = smblib_update_thermal_readings(chg); + if (rc < 0) + smblib_err(chg, "Couldn't read current thermal values %d\n", + rc); + + rc = smblib_process_thermal_readings(chg); + if (rc < 0) + smblib_err(chg, "Couldn't run sw thermal regulation %d\n", + rc); +} + #define JEITA_SOFT 0 #define JEITA_HARD 1 static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds, @@ -4677,8 +5153,9 @@ static void smblib_lpd_ra_open_work(struct work_struct *work) goto out; } - /* double check water detection status bit */ - if (!(stat & TYPEC_WATER_DETECTION_STATUS_BIT)) { + /* quit if moisture status is gone or in attached state */ + if (!(stat & TYPEC_WATER_DETECTION_STATUS_BIT) + || (stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)) { chg->lpd_stage = LPD_STAGE_NONE; goto out; } @@ -4689,8 +5166,8 @@ static void smblib_lpd_ra_open_work(struct work_struct *work) pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE; rc = smblib_set_prop_typec_power_role(chg, &pval); if (rc < 0) { - smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", - pval.intval, rc); + smblib_err(chg, "Couldn't set typec source only mode rc=%d\n", + rc); goto out; } @@ -4704,9 +5181,19 @@ static void smblib_lpd_ra_open_work(struct work_struct *work) goto out; } - /* Emark cable */ - if ((stat & SRC_RA_OPEN_BIT) && - !smblib_rsbux_low(chg, RSBU_K_300K_UV)) { + if (smblib_rsbux_low(chg, RSBU_K_300K_UV)) { + /* Moisture detected, enable sink only mode */ + pval.intval = POWER_SUPPLY_TYPEC_PR_SINK; + rc = smblib_set_prop_typec_power_role(chg, &pval); + if (rc < 0) { + smblib_err(chg, "Couldn't set typec sink only rc=%d\n", + rc); + goto out; + } + + chg->lpd_reason = LPD_MOISTURE_DETECTED; + + } else { /* Floating cable, disable water detection irq temporarily */ rc = smblib_masked_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, TYPEC_WATER_DETECTION_INT_EN_BIT, 0); @@ -4726,17 +5213,6 @@ static void smblib_lpd_ra_open_work(struct work_struct *work) } chg->lpd_reason = LPD_FLOATING_CABLE; - } else { - /* Moisture detected, enable sink only mode */ - pval.intval = POWER_SUPPLY_TYPEC_PR_SINK; - rc = smblib_set_prop_typec_power_role(chg, &pval); - if (rc < 0) { - smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n", - pval.intval, rc); - goto out; - } - - chg->lpd_reason = LPD_MOISTURE_DETECTED; } /* recheck in 60 seconds */ @@ -4750,7 +5226,7 @@ static void smblib_lpd_detach_work(struct work_struct *work) struct smb_charger *chg = container_of(work, struct smb_charger, lpd_detach_work.work); - if (chg->lpd_stage == LPD_STAGE_DETACHED) + if (chg->lpd_stage == LPD_STAGE_FLOAT_CANCEL) chg->lpd_stage = LPD_STAGE_NONE; } @@ -4834,6 +5310,16 @@ static int smblib_create_votables(struct smb_charger *chg) return rc; } + chg->wdog_snarl_irq_en_votable = create_votable("SNARL_WDOG_IRQ_ENABLE", + VOTE_SET_ANY, + smblib_wdog_snarl_irq_en_vote_callback, + chg); + if (IS_ERR(chg->wdog_snarl_irq_en_votable)) { + rc = PTR_ERR(chg->wdog_snarl_irq_en_votable); + chg->wdog_snarl_irq_en_votable = NULL; + return rc; + } + return rc; } @@ -4868,7 +5354,7 @@ int smblib_init(struct smb_charger *chg) union power_supply_propval prop_val; int rc = 0; - mutex_init(&chg->lock); + mutex_init(&chg->smb_lock); INIT_WORK(&chg->bms_update_work, bms_update_work); INIT_WORK(&chg->pl_update_work, pl_update_work); INIT_WORK(&chg->jeita_update_work, jeita_update_work); @@ -4879,6 +5365,8 @@ int smblib_init(struct smb_charger *chg) INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work); INIT_DELAYED_WORK(&chg->lpd_ra_open_work, smblib_lpd_ra_open_work); INIT_DELAYED_WORK(&chg->lpd_detach_work, smblib_lpd_detach_work); + INIT_DELAYED_WORK(&chg->thermal_regulation_work, + smblib_thermal_regulation_work); chg->fake_capacity = -EINVAL; chg->fake_input_current_limited = -EINVAL; chg->fake_batt_status = -EINVAL; @@ -4916,16 +5404,16 @@ int smblib_init(struct smb_charger *chg) if (chg->sec_pl_present) { chg->pl.psy = power_supply_get_by_name("parallel"); if (chg->pl.psy) { + mutex_lock(&chg->smb_lock); if (chg->sec_chg_selected != POWER_SUPPLY_CHARGER_SEC_CP) { rc = smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_PL); - if (rc < 0) { + if (rc < 0) smblib_err(chg, "Couldn't config pl charger rc=%d\n", rc); - return rc; - } } + mutex_unlock(&chg->smb_lock); if (chg->smb_temp_max == -EINVAL) { rc = smblib_get_thermal_threshold(chg, @@ -4981,6 +5469,7 @@ int smblib_deinit(struct smb_charger *chg) cancel_delayed_work_sync(&chg->bb_removal_work); cancel_delayed_work_sync(&chg->lpd_ra_open_work); cancel_delayed_work_sync(&chg->lpd_detach_work); + cancel_delayed_work_sync(&chg->thermal_regulation_work); power_supply_unreg_notifier(&chg->nb); smblib_destroy_votables(chg); qcom_step_chg_deinit(); diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 380a3a7d5c9ddb9a36c97334bb13d79a9468a0d6..6962f9dbd8c36538cb185c35539f85083a7ed9b5 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -64,6 +64,7 @@ enum print_reason { #define FORCE_RECHARGE_VOTER "FORCE_RECHARGE_VOTER" #define LPD_VOTER "LPD_VOTER" #define FCC_STEPPER_VOTER "FCC_STEPPER_VOTER" +#define SW_THERM_REGULATION_VOTER "SW_THERM_REGULATION_VOTER" #define BOOST_BACK_STORM_COUNT 3 #define WEAK_CHG_STORM_COUNT 8 @@ -92,6 +93,7 @@ enum qc2_non_comp_voltage { enum { BOOST_BACK_WA = BIT(0), + SW_THERM_REGULATION_WA = BIT(1), }; enum smb_irq_index { @@ -204,14 +206,27 @@ enum lpd_reason { LPD_FLOATING_CABLE, }; +/* Following states are applicable only for floating cable during LPD */ enum lpd_stage { + /* initial stage */ LPD_STAGE_NONE, + /* started and ongoing */ LPD_STAGE_FLOAT, - LPD_STAGE_ATTACHED, - LPD_STAGE_DETACHED, + /* cancel if started, or don't start */ + LPD_STAGE_FLOAT_CANCEL, + /* confirmed and mitigation measures taken for 60 s */ LPD_STAGE_COMMIT, }; +enum thermal_status_levels { + TEMP_SHUT_DOWN = 0, + TEMP_SHUT_DOWN_SMB, + TEMP_ALERT_LEVEL, + TEMP_ABOVE_RANGE, + TEMP_WITHIN_RANGE, + TEMP_BELOW_RANGE, +}; + /* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */ static const u32 smblib_extcon_exclusive[] = {0x3, 0}; @@ -279,6 +294,9 @@ struct smb_iio { struct iio_channel *connector_temp_chan; struct iio_channel *sbux_chan; struct iio_channel *vph_v_chan; + struct iio_channel *die_temp_chan; + struct iio_channel *skin_temp_chan; + struct iio_channel *smb_temp_chan; }; struct smb_charger { @@ -298,7 +316,7 @@ struct smb_charger { bool pd_not_supported; /* locks */ - struct mutex lock; + struct mutex smb_lock; struct mutex ps_change_lock; /* power supplies */ @@ -332,6 +350,8 @@ struct smb_charger { struct votable *chg_disable_votable; struct votable *pl_enable_votable_indirect; struct votable *usb_irq_enable_votable; + struct votable *cp_disable_votable; + struct votable *wdog_snarl_irq_en_votable; /* work */ struct work_struct bms_update_work; @@ -345,6 +365,7 @@ struct smb_charger { struct delayed_work bb_removal_work; struct delayed_work lpd_ra_open_work; struct delayed_work lpd_detach_work; + struct delayed_work thermal_regulation_work; struct alarm lpd_recheck_timer; @@ -401,6 +422,12 @@ struct smb_charger { enum lpd_stage lpd_stage; enum lpd_reason lpd_reason; bool fcc_stepper_enable; + int die_temp; + int smb_temp; + int skin_temp; + int connector_temp; + int thermal_status; + int main_fcc_max; /* workaround flag */ u32 wa_flags; @@ -479,6 +506,7 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data); irqreturn_t dc_plugin_irq_handler(int irq, void *data); irqreturn_t high_duty_cycle_irq_handler(int irq, void *data); irqreturn_t switcher_power_ok_irq_handler(int irq, void *data); +irqreturn_t wdog_snarl_irq_handler(int irq, void *data); irqreturn_t wdog_bark_irq_handler(int irq, void *data); irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data); @@ -599,10 +627,15 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg, int smblib_get_prop_from_bms(struct smb_charger *chg, enum power_supply_property psp, union power_supply_propval *val); +int smblib_get_iio_channel(struct smb_charger *chg, const char *propname, + struct iio_channel **chan); +int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan, + int div, int *data); int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable); int smblib_icl_override(struct smb_charger *chg, bool override); enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm, ktime_t time); +int smblib_toggle_smb_en(struct smb_charger *chg, int toggle); int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h index fd0385c507700c2016197884461501837d74d307..aee35076044c146d430b77cf69f979eecf5ee361 100644 --- a/drivers/power/supply/qcom/smb5-reg.h +++ b/drivers/power/supply/qcom/smb5-reg.h @@ -328,6 +328,7 @@ enum { #define TYPEC_WATER_DETECTION_STATUS_BIT BIT(7) #define SNK_SRC_MODE_BIT BIT(6) #define TYPEC_VBUS_ERROR_STATUS_BIT BIT(4) +#define TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT BIT(3) #define CC_ORIENTATION_BIT BIT(1) #define CC_ATTACHED_BIT BIT(0) @@ -438,11 +439,6 @@ enum { #define AICL_CMD_REG (MISC_BASE + 0x44) #define RERUN_AICL_BIT BIT(0) -#define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x43) -#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT BIT(7) -#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2) -#define BITE_WDOG_TIMEOUT_MASK GENMASK(1, 0) - #define MISC_SMB_EN_CMD_REG (MISC_BASE + 0x48) #define SMB_EN_OVERRIDE_VALUE_BIT BIT(4) #define SMB_EN_OVERRIDE_BIT BIT(3) @@ -455,11 +451,24 @@ enum { #define BARK_WDOG_INT_EN_BIT BIT(6) #define WDOG_TIMER_EN_ON_PLUGIN_BIT BIT(1) +#define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x53) +#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT BIT(7) +#define SNARL_WDOG_TIMEOUT_MASK GENMASK(6, 4) +#define SNARL_WDOG_TMOUT_62P5MS 0x00 +#define SNARL_WDOG_TMOUT_1S 0x40 +#define SNARL_WDOG_TMOUT_8S 0x70 +#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2) +#define BARK_WDOG_TIMEOUT_SHIFT 2 +#define BITE_WDOG_TIMEOUT_MASK GENMASK(1, 0) +#define BITE_WDOG_TIMEOUT_8S 0x3 +#define MIN_WD_BARK_TIME 16 + #define AICL_RERUN_TIME_CFG_REG (MISC_BASE + 0x61) #define AICL_RERUN_TIME_12S_VAL 0x01 #define MISC_THERMREG_SRC_CFG_REG (MISC_BASE + 0x70) #define THERMREG_SMB_ADC_SRC_EN_BIT BIT(5) +#define THERMREG_DIE_CMP_SRC_EN_BIT BIT(0) #define MISC_SMB_CFG_REG (MISC_BASE + 0x90) #define SMB_EN_SEL_BIT BIT(4) diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c index c0c679eaff8f02de9281b631b501a98a88388d6d..a23f495a58523752b8aff239b15eb560708a15e3 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.c +++ b/drivers/power/supply/qcom/step-chg-jeita.c @@ -30,24 +30,18 @@ && (value) <= (right))) struct step_chg_cfg { - u32 psy_prop; - char *prop_name; - int hysteresis; - struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; + struct step_chg_jeita_param param; + struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; }; struct jeita_fcc_cfg { - u32 psy_prop; - char *prop_name; - int hysteresis; - struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; + struct step_chg_jeita_param param; + struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; }; struct jeita_fv_cfg { - u32 psy_prop; - char *prop_name; - int hysteresis; - struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES]; + struct step_chg_jeita_param param; + struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES]; }; struct step_chg_info { @@ -60,6 +54,7 @@ struct step_chg_info { bool step_chg_cfg_valid; bool sw_jeita_cfg_valid; bool soc_based_step_chg; + bool ocv_based_step_chg; bool batt_missing; int jeita_fcc_index; int jeita_fv_index; @@ -77,6 +72,7 @@ struct step_chg_info { struct power_supply *batt_psy; struct power_supply *bms_psy; struct power_supply *usb_psy; + struct power_supply *main_psy; struct delayed_work status_change_work; struct delayed_work get_config_work; struct notifier_block nb; @@ -266,9 +262,20 @@ static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip) chip->soc_based_step_chg = of_property_read_bool(profile_node, "qcom,soc-based-step-chg"); if (chip->soc_based_step_chg) { - chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_CAPACITY, - chip->step_chg_config->prop_name = "SOC"; - chip->step_chg_config->hysteresis = 0; + chip->step_chg_config->param.psy_prop = + POWER_SUPPLY_PROP_CAPACITY; + chip->step_chg_config->param.prop_name = "SOC"; + chip->step_chg_config->param.hysteresis = 0; + } + + chip->ocv_based_step_chg = + of_property_read_bool(profile_node, "qcom,ocv-based-step-chg"); + if (chip->ocv_based_step_chg) { + chip->step_chg_config->param.psy_prop = + POWER_SUPPLY_PROP_VOLTAGE_OCV; + chip->step_chg_config->param.prop_name = "OCV"; + chip->step_chg_config->param.hysteresis = 10000; + chip->step_chg_config->param.use_bms = true; } chip->step_chg_cfg_valid = true; @@ -457,16 +464,21 @@ static int handle_step_chg_config(struct step_chg_info *chip) goto update_time; } - rc = power_supply_get_property(chip->batt_psy, - chip->step_chg_config->psy_prop, &pval); + if (chip->step_chg_config->param.use_bms) + rc = power_supply_get_property(chip->bms_psy, + chip->step_chg_config->param.psy_prop, &pval); + else + rc = power_supply_get_property(chip->batt_psy, + chip->step_chg_config->param.psy_prop, &pval); + if (rc < 0) { pr_err("Couldn't read %s property rc=%d\n", - chip->step_chg_config->prop_name, rc); + chip->step_chg_config->param.prop_name, rc); return rc; } rc = get_val(chip->step_chg_config->fcc_cfg, - chip->step_chg_config->hysteresis, + chip->step_chg_config->param.hysteresis, chip->step_index, pval.intval, &chip->step_index, @@ -486,7 +498,7 @@ static int handle_step_chg_config(struct step_chg_info *chip) vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua); pr_debug("%s = %d Step-FCC = %duA\n", - chip->step_chg_config->prop_name, pval.intval, fcc_ua); + chip->step_chg_config->param.prop_name, pval.intval, fcc_ua); update_time: chip->step_last_update_time = ktime_get(); @@ -525,16 +537,21 @@ static int handle_jeita(struct step_chg_info *chip) if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US) goto reschedule; - rc = power_supply_get_property(chip->batt_psy, - chip->jeita_fcc_config->psy_prop, &pval); + if (chip->jeita_fcc_config->param.use_bms) + rc = power_supply_get_property(chip->bms_psy, + chip->jeita_fcc_config->param.psy_prop, &pval); + else + rc = power_supply_get_property(chip->batt_psy, + chip->jeita_fcc_config->param.psy_prop, &pval); + if (rc < 0) { pr_err("Couldn't read %s property rc=%d\n", - chip->jeita_fcc_config->prop_name, rc); + chip->jeita_fcc_config->param.prop_name, rc); return rc; } rc = get_val(chip->jeita_fcc_config->fcc_cfg, - chip->jeita_fcc_config->hysteresis, + chip->jeita_fcc_config->param.hysteresis, chip->jeita_fcc_index, pval.intval, &chip->jeita_fcc_index, @@ -551,7 +568,7 @@ static int handle_jeita(struct step_chg_info *chip) vote(chip->fcc_votable, JEITA_VOTER, fcc_ua ? true : false, fcc_ua); rc = get_val(chip->jeita_fv_config->fv_cfg, - chip->jeita_fv_config->hysteresis, + chip->jeita_fv_config->param.hysteresis, chip->jeita_fv_index, pval.intval, &chip->jeita_fv_index, @@ -598,6 +615,12 @@ static int handle_jeita(struct step_chg_info *chip) update_time: chip->jeita_last_update_time = ktime_get(); + + if (!chip->main_psy) + chip->main_psy = power_supply_get_by_name("main"); + if (chip->main_psy) + power_supply_changed(chip->main_psy); + return 0; reschedule: @@ -648,7 +671,7 @@ static void status_change_work(struct work_struct *work) int reschedule_step_work_us = 0; union power_supply_propval prop = {0, }; - if (!is_batt_available(chip)) + if (!is_batt_available(chip) || !is_bms_available(chip)) goto exit_work; handle_battery_insertion(chip); @@ -760,9 +783,9 @@ int qcom_step_chg_init(struct device *dev, if (!chip->step_chg_config) return -ENOMEM; - chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW; - chip->step_chg_config->prop_name = "VBATT"; - chip->step_chg_config->hysteresis = 100000; + chip->step_chg_config->param.psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW; + chip->step_chg_config->param.prop_name = "VBATT"; + chip->step_chg_config->param.hysteresis = 100000; chip->jeita_fcc_config = devm_kzalloc(dev, sizeof(struct jeita_fcc_cfg), GFP_KERNEL); @@ -771,12 +794,12 @@ int qcom_step_chg_init(struct device *dev, if (!chip->jeita_fcc_config || !chip->jeita_fv_config) return -ENOMEM; - chip->jeita_fcc_config->psy_prop = POWER_SUPPLY_PROP_TEMP; - chip->jeita_fcc_config->prop_name = "BATT_TEMP"; - chip->jeita_fcc_config->hysteresis = 10; - chip->jeita_fv_config->psy_prop = POWER_SUPPLY_PROP_TEMP; - chip->jeita_fv_config->prop_name = "BATT_TEMP"; - chip->jeita_fv_config->hysteresis = 10; + chip->jeita_fcc_config->param.psy_prop = POWER_SUPPLY_PROP_TEMP; + chip->jeita_fcc_config->param.prop_name = "BATT_TEMP"; + chip->jeita_fcc_config->param.hysteresis = 10; + chip->jeita_fv_config->param.psy_prop = POWER_SUPPLY_PROP_TEMP; + chip->jeita_fv_config->param.prop_name = "BATT_TEMP"; + chip->jeita_fv_config->param.hysteresis = 10; INIT_DELAYED_WORK(&chip->status_change_work, status_change_work); INIT_DELAYED_WORK(&chip->get_config_work, get_config_work); diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h index 6760d66143f16ae5871117286eb5fc91cfec4e15..df882da9ba314066b934143128978f3715d1a4d0 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.h +++ b/drivers/power/supply/qcom/step-chg-jeita.h @@ -15,6 +15,13 @@ #define MAX_STEP_CHG_ENTRIES 8 +struct step_chg_jeita_param { + u32 psy_prop; + char *prop_name; + int hysteresis; + bool use_bms; +}; + struct range_data { u32 low_threshold; u32 high_threshold; diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 89ff9991cc1ea62c8e55f183b3cbc2e0b1e373db..9e529d0e5f3faa7ca4ef6492338d88b567bb60ae 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -535,9 +535,19 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) if (state->period != pwm->state.period || state->duty_cycle != pwm->state.duty_cycle) { - err = pwm->chip->ops->config(pwm->chip, pwm, - state->duty_cycle, - state->period); + if (pwm->chip->ops->config_extend) { + err = pwm->chip->ops->config_extend(pwm->chip, + pwm, state->duty_cycle, + state->period); + } else { + if (state->period > UINT_MAX) + pr_warn("period %llu duty_cycle %llu will be truncated\n", + state->period, + state->duty_cycle); + err = pwm->chip->ops->config(pwm->chip, pwm, + state->duty_cycle, + state->period); + } if (err) return err; @@ -1021,8 +1031,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) if (state.enabled) seq_puts(s, " enabled"); - seq_printf(s, " period: %u ns", state.period); - seq_printf(s, " duty: %u ns", state.duty_cycle); + seq_printf(s, " period: %llu ns", state.period); + seq_printf(s, " duty: %llu ns", state.duty_cycle); seq_printf(s, " polarity: %s", state.polarity ? "inverse" : "normal"); diff --git a/drivers/pwm/pwm-qti-lpg.c b/drivers/pwm/pwm-qti-lpg.c index 784fc8bb3ae226d5d648dcb1ee5a3ec0afd4a268..ee2f98c776bfaf86d412bc2b73b485cdcf9115a5 100644 --- a/drivers/pwm/pwm-qti-lpg.c +++ b/drivers/pwm/pwm-qti-lpg.c @@ -134,7 +134,7 @@ struct lpg_pwm_config { u32 prediv; u32 clk_exp; u16 pwm_value; - u32 best_period_ns; + u64 best_period_ns; }; struct qpnp_lpg_lut { @@ -154,8 +154,8 @@ struct qpnp_lpg_channel { u8 src_sel; u8 subtype; bool lut_written; - int current_period_ns; - int current_duty_ns; + u64 current_period_ns; + u64 current_duty_ns; }; struct qpnp_lpg_chip { @@ -506,15 +506,15 @@ static int qpnp_lpg_set_ramp_config(struct qpnp_lpg_channel *lpg) return rc; } -static void __qpnp_lpg_calc_pwm_period(int period_ns, +static void __qpnp_lpg_calc_pwm_period(u64 period_ns, struct lpg_pwm_config *pwm_config) { struct lpg_pwm_config configs[NUM_PWM_SIZE]; int i, j, m, n; - int tmp1, tmp2; - int clk_period_ns = 0, pwm_clk_period_ns; - int clk_delta_ns = INT_MAX, min_clk_delta_ns = INT_MAX; - int pwm_period_delta = INT_MAX, min_pwm_period_delta = INT_MAX; + u64 tmp1, tmp2; + u64 clk_period_ns = 0, pwm_clk_period_ns; + u64 clk_delta_ns = U64_MAX, min_clk_delta_ns = U64_MAX; + u64 pwm_period_delta = U64_MAX, min_pwm_period_delta = U64_MAX; int pwm_size_step; /* @@ -531,7 +531,8 @@ static void __qpnp_lpg_calc_pwm_period(int period_ns, for (m = 0; m < ARRAY_SIZE(pwm_exponent); m++) { tmp1 = 1 << pwm_exponent[m]; tmp1 *= clk_prediv[j]; - tmp2 = NSEC_PER_SEC / clk_freq_hz[i]; + tmp2 = NSEC_PER_SEC; + do_div(tmp2, clk_freq_hz[i]); clk_period_ns = tmp1 * tmp2; @@ -561,10 +562,7 @@ static void __qpnp_lpg_calc_pwm_period(int period_ns, configs[n].best_period_ns *= 1 << pwm_size[n]; /* Find the closest setting for PWM period */ - if (min_clk_delta_ns < INT_MAX >> pwm_size[n]) - pwm_period_delta = min_clk_delta_ns << pwm_size[n]; - else - pwm_period_delta = INT_MAX; + pwm_period_delta = min_clk_delta_ns << pwm_size[n]; if (pwm_period_delta < min_pwm_period_delta) { min_pwm_period_delta = pwm_period_delta; memcpy(pwm_config, &configs[n], @@ -582,21 +580,20 @@ static void __qpnp_lpg_calc_pwm_period(int period_ns, pwm_config->clk_exp -= pwm_size_step; } } - pr_debug("PWM setting for period_ns %d: pwm_clk = %dHZ, prediv = %d, exponent = %d, pwm_size = %d\n", + pr_debug("PWM setting for period_ns %llu: pwm_clk = %dHZ, prediv = %d, exponent = %d, pwm_size = %d\n", period_ns, pwm_config->pwm_clk, pwm_config->prediv, pwm_config->clk_exp, pwm_config->pwm_size); - pr_debug("Actual period: %dns\n", pwm_config->best_period_ns); + pr_debug("Actual period: %lluns\n", pwm_config->best_period_ns); } -static void __qpnp_lpg_calc_pwm_duty(int period_ns, int duty_ns, +static void __qpnp_lpg_calc_pwm_duty(u64 period_ns, u64 duty_ns, struct lpg_pwm_config *pwm_config) { u16 pwm_value, max_pwm_value; + u64 tmp; - if ((1 << pwm_config->pwm_size) > (INT_MAX / duty_ns)) - pwm_value = duty_ns / (period_ns >> pwm_config->pwm_size); - else - pwm_value = (duty_ns << pwm_config->pwm_size) / period_ns; + tmp = (u64)duty_ns << pwm_config->pwm_size; + pwm_value = (u16)div64_u64(tmp, period_ns); max_pwm_value = (1 << pwm_config->pwm_size) - 1; if (pwm_value > max_pwm_value) @@ -604,20 +601,13 @@ static void __qpnp_lpg_calc_pwm_duty(int period_ns, int duty_ns, pwm_config->pwm_value = pwm_value; } -static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip, - struct pwm_device *pwm, int duty_ns, int period_ns) +static int qpnp_lpg_config(struct qpnp_lpg_channel *lpg, + u64 duty_ns, u64 period_ns) { - struct qpnp_lpg_channel *lpg; - int rc = 0; - - lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm); - if (lpg == NULL) { - dev_err(pwm_chip->dev, "lpg not found\n"); - return -ENODEV; - } + int rc; if (duty_ns > period_ns) { - dev_err(pwm_chip->dev, "Duty %dns is larger than period %dns\n", + dev_err(lpg->chip->dev, "Duty %lluns is larger than period %lluns\n", duty_ns, period_ns); return -EINVAL; } @@ -631,7 +621,7 @@ static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip, lpg->ramp_config.pattern, lpg->ramp_config.pattern_length); if (rc < 0) { - dev_err(pwm_chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n", + dev_err(lpg->chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n", lpg->lpg_idx, rc); return rc; } @@ -645,7 +635,7 @@ static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip, rc = qpnp_lpg_set_pwm_config(lpg); if (rc < 0) { - dev_err(pwm_chip->dev, "Config PWM failed for channel %d, rc=%d\n", + dev_err(lpg->chip->dev, "Config PWM failed for channel %d, rc=%d\n", lpg->lpg_idx, rc); return rc; } @@ -656,6 +646,34 @@ static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip, return rc; } +static int qpnp_lpg_pwm_config(struct pwm_chip *pwm_chip, + struct pwm_device *pwm, int duty_ns, int period_ns) +{ + struct qpnp_lpg_channel *lpg; + + lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm); + if (lpg == NULL) { + dev_err(pwm_chip->dev, "lpg not found\n"); + return -ENODEV; + } + + return qpnp_lpg_config(lpg, (u64)duty_ns, (u64)period_ns); +} + +static int qpnp_lpg_pwm_config_extend(struct pwm_chip *pwm_chip, + struct pwm_device *pwm, u64 duty_ns, u64 period_ns) +{ + struct qpnp_lpg_channel *lpg; + + lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm); + if (lpg == NULL) { + dev_err(pwm_chip->dev, "lpg not found\n"); + return -ENODEV; + } + + return qpnp_lpg_config(lpg, duty_ns, period_ns); +} + static int qpnp_lpg_pwm_src_enable(struct qpnp_lpg_channel *lpg, bool en) { struct qpnp_lpg_chip *chip = lpg->chip; @@ -747,7 +765,7 @@ static int qpnp_lpg_pwm_set_output_type(struct pwm_chip *pwm_chip, lpg->ramp_config.pattern, lpg->ramp_config.pattern_length); if (rc < 0) { - dev_err(pwm_chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n", + dev_err(lpg->chip->dev, "set LUT pattern failed for LPG%d, rc=%d\n", lpg->lpg_idx, rc); return rc; } @@ -780,8 +798,9 @@ static int qpnp_lpg_pwm_set_output_pattern(struct pwm_chip *pwm_chip, struct pwm_device *pwm, struct pwm_output_pattern *output_pattern) { struct qpnp_lpg_channel *lpg; - int rc = 0, i, period_ns, duty_ns; + u64 period_ns, duty_ns, tmp; u32 *percentages; + int rc = 0, i; lpg = pwm_dev_to_qpnp_lpg(pwm_chip, pwm); if (lpg == NULL) { @@ -801,19 +820,17 @@ static int qpnp_lpg_pwm_set_output_pattern(struct pwm_chip *pwm_chip, if (!percentages) return -ENOMEM; - period_ns = pwm_get_period(pwm); + period_ns = pwm_get_period_extend(pwm); for (i = 0; i < output_pattern->num_entries; i++) { duty_ns = output_pattern->duty_pattern[i]; if (duty_ns > period_ns) { - dev_err(lpg->chip->dev, "duty %dns is larger than period %dns\n", + dev_err(lpg->chip->dev, "duty %lluns is larger than period %lluns\n", duty_ns, period_ns); goto err; } /* Translate the pattern in duty_ns to percentage */ - if ((INT_MAX / duty_ns) < 100) - percentages[i] = duty_ns / (period_ns / 100); - else - percentages[i] = (duty_ns * 100) / period_ns; + tmp = (u64)duty_ns * 100; + percentages[i] = (u32)div64_u64(tmp, period_ns); } rc = qpnp_lpg_set_lut_pattern(lpg, percentages, @@ -829,12 +846,10 @@ static int qpnp_lpg_pwm_set_output_pattern(struct pwm_chip *pwm_chip, output_pattern->num_entries); lpg->ramp_config.hi_idx = lpg->ramp_config.lo_idx + output_pattern->num_entries - 1; - if ((INT_MAX / period_ns) > output_pattern->cycles_per_duty) - lpg->ramp_config.step_ms = output_pattern->cycles_per_duty * - period_ns / NSEC_PER_MSEC; - else - lpg->ramp_config.step_ms = (period_ns / NSEC_PER_MSEC) * - output_pattern->cycles_per_duty; + + tmp = (u64)output_pattern->cycles_per_duty * period_ns; + do_div(tmp, NSEC_PER_MSEC); + lpg->ramp_config.step_ms = (u16)tmp; rc = qpnp_lpg_set_ramp_config(lpg); if (rc < 0) @@ -956,8 +971,8 @@ static void qpnp_lpg_pwm_dbg_show(struct pwm_chip *pwm_chip, struct seq_file *s) seq_printf(s, " prediv = %d\n", cfg->prediv); seq_printf(s, " exponent = %d\n", cfg->clk_exp); seq_printf(s, " pwm_value = %d\n", cfg->pwm_value); - seq_printf(s, " Requested period: %dns, best period = %dns\n", - pwm_get_period(pwm), cfg->best_period_ns); + seq_printf(s, " Requested period: %lluns, best period = %lluns\n", + pwm_get_period_extend(pwm), cfg->best_period_ns); ramp = &lpg->ramp_config; if (pwm_get_output_type(pwm) == PWM_OUTPUT_MODULATED) { @@ -988,6 +1003,7 @@ static void qpnp_lpg_pwm_dbg_show(struct pwm_chip *pwm_chip, struct seq_file *s) static const struct pwm_ops qpnp_lpg_pwm_ops = { .config = qpnp_lpg_pwm_config, + .config_extend = qpnp_lpg_pwm_config_extend, .get_output_type_supported = qpnp_lpg_pwm_output_types_supported, .set_output_type = qpnp_lpg_pwm_set_output_type, .set_output_pattern = qpnp_lpg_pwm_set_output_pattern, diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index ea2b53d171991c100104f05f56f7fc74a4563f1e..50b7a3b278c13e960c22e8609c0992733053962d 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -50,7 +50,7 @@ static ssize_t period_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.period); + return sprintf(buf, "%llu\n", state.period); } static ssize_t period_store(struct device *child, @@ -85,7 +85,7 @@ static ssize_t duty_cycle_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.duty_cycle); + return sprintf(buf, "%llu\n", state.duty_cycle); } static ssize_t duty_cycle_store(struct device *child, @@ -220,7 +220,7 @@ static ssize_t capture_show(struct device *child, if (ret) return ret; - return sprintf(buf, "%u %u\n", result.period, result.duty_cycle); + return sprintf(buf, "%llu %llu\n", result.period, result.duty_cycle); } static ssize_t output_type_show(struct device *child, diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index bb768cc4428526d787c610406cca2d399102c550..8c624c8e489ccb6d497aaba5fd558d3564e66d8f 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -12,6 +12,7 @@ * published by the Free Software Foundation. * */ +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -40,7 +42,10 @@ /* VSEL bit definitions */ #define VSEL_BUCK_EN (1 << 7) #define VSEL_MODE (1 << 6) -#define VSEL_NSEL_MASK 0x3F +#define HL7503_VSEL0_MODE BIT(0) +#define HL7503_VSEL1_MODE BIT(1) +#define VSEL_NSEL_MASK 0x3F +#define HL7503_VSEL_NSEL_MASK 0x7F /* Chip ID and Verison */ #define DIE_ID 0x0F /* ID1 */ #define DIE_REV 0x0F /* ID2 */ @@ -51,11 +56,13 @@ #define CTL_RESET (1 << 2) #define FAN53555_NVOLTAGES 64 /* Numbers of voltages */ +#define HL7503_NVOLTAGES 128 enum fan53555_vendor { FAN53555_VENDOR_FAIRCHILD = 0, FAN53555_VENDOR_SILERGY, HALO_HL7509, + HALO_HL7503, }; /* IC Type */ @@ -99,13 +106,16 @@ struct fan53555_device_info { unsigned int slew_rate; /* Sleep voltage cache */ unsigned int sleep_vol_cache; + unsigned int peek_poke_address; /* Disable suspend */ bool disable_suspend; + struct dentry *debug_root; }; static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct fan53555_device_info *di = rdev_get_drvdata(rdev); + u8 vsel_mask; int ret; if (di->disable_suspend) @@ -115,8 +125,10 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) ret = regulator_map_voltage_linear(rdev, uV, uV); if (ret < 0) return ret; - ret = regmap_update_bits(di->regmap, di->sleep_reg, - VSEL_NSEL_MASK, ret); + + vsel_mask = (di->vendor == HALO_HL7503) ? HL7503_VSEL_NSEL_MASK + : VSEL_NSEL_MASK; + ret = regmap_update_bits(di->regmap, di->sleep_reg, vsel_mask, ret); if (ret < 0) return ret; /* Cache the sleep voltage setting. @@ -130,6 +142,9 @@ static int fan53555_set_suspend_enable(struct regulator_dev *rdev) { struct fan53555_device_info *di = rdev_get_drvdata(rdev); + if (di->disable_suspend) + return 0; + return regmap_update_bits(di->regmap, di->sleep_reg, VSEL_BUCK_EN, VSEL_BUCK_EN); } @@ -138,6 +153,9 @@ static int fan53555_set_suspend_disable(struct regulator_dev *rdev) { struct fan53555_device_info *di = rdev_get_drvdata(rdev); + if (di->disable_suspend) + return 0; + return regmap_update_bits(di->regmap, di->sleep_reg, VSEL_BUCK_EN, 0); } @@ -145,14 +163,23 @@ static int fan53555_set_suspend_disable(struct regulator_dev *rdev) static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct fan53555_device_info *di = rdev_get_drvdata(rdev); + unsigned int reg, mask; + + reg = di->vol_reg; + mask = VSEL_MODE; + if (di->vendor == HALO_HL7503) { + /* uses control register for mode control */ + reg = FAN53555_CONTROL; + mask = (di->vol_reg = FAN53555_VSEL0) ? HL7503_VSEL0_MODE + : HL7503_VSEL1_MODE; + } switch (mode) { case REGULATOR_MODE_FAST: - regmap_update_bits(di->regmap, di->vol_reg, - VSEL_MODE, VSEL_MODE); + regmap_update_bits(di->regmap, reg, mask, mask); break; case REGULATOR_MODE_NORMAL: - regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0); + regmap_update_bits(di->regmap, reg, mask, 0); break; default: return -EINVAL; @@ -163,13 +190,22 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode) static unsigned int fan53555_get_mode(struct regulator_dev *rdev) { struct fan53555_device_info *di = rdev_get_drvdata(rdev); - unsigned int val; + unsigned int val, reg, mask; int ret = 0; - ret = regmap_read(di->regmap, di->vol_reg, &val); + reg = di->vol_reg; + mask = VSEL_MODE; + if (di->vendor == HALO_HL7503) { + /* uses control register for mode control */ + reg = FAN53555_CONTROL; + mask = (di->vol_reg = FAN53555_VSEL0) ? HL7503_VSEL0_MODE + : HL7503_VSEL1_MODE; + } + + ret = regmap_read(di->regmap, reg, &val); if (ret < 0) return ret; - if (val & VSEL_MODE) + if (val & mask) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; @@ -282,6 +318,23 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) return 0; } +static int hl7503_voltages_setup_halo(struct fan53555_device_info *di) +{ + /* Init voltage range and step */ + switch (di->chip_id) { + case FAN53555_CHIP_ID_08: + di->vsel_min = 600000; + di->vsel_step = 6250; + break; + default: + dev_err(di->dev, + "Chip ID %d not supported!\n", di->chip_id); + return -EINVAL; + } + + return 0; +} + /* For 00,01,03,05 options: * VOUT = 0.60V + NSELx * 10mV, from 0.60 to 1.23V. * For 04 option: @@ -317,6 +370,9 @@ static int fan53555_device_setup(struct fan53555_device_info *di, case HALO_HL7509: ret = fan53555_voltages_setup_fairchild(di); break; + case HALO_HL7503: + ret = hl7503_voltages_setup_halo(di); + break; default: dev_err(di->dev, "vendor %d not supported!\n", di->vendor); return -EINVAL; @@ -334,15 +390,21 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, rdesc->supply_name = "vin"; rdesc->ops = &fan53555_regulator_ops; rdesc->type = REGULATOR_VOLTAGE; - rdesc->n_voltages = FAN53555_NVOLTAGES; rdesc->enable_reg = di->vol_reg; rdesc->enable_mask = VSEL_BUCK_EN; rdesc->min_uV = di->vsel_min; rdesc->uV_step = di->vsel_step; rdesc->vsel_reg = di->vol_reg; - rdesc->vsel_mask = VSEL_NSEL_MASK; rdesc->owner = THIS_MODULE; + if (di->vendor == HALO_HL7503) { + rdesc->n_voltages = HL7503_NVOLTAGES; + rdesc->vsel_mask = HL7503_VSEL_NSEL_MASK; + } else { + rdesc->n_voltages = FAN53555_NVOLTAGES; + rdesc->vsel_mask = VSEL_NSEL_MASK; + } + di->rdev = devm_regulator_register(di->dev, &di->desc, config); return PTR_ERR_OR_ZERO(di->rdev); } @@ -350,8 +412,54 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, static const struct regmap_config fan53555_regmap_config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0x05, }; +static int fan53555_parse_vsel_gpio(struct fan53555_device_info *di) +{ + struct device_node *np = di->dev->of_node; + unsigned int val; + int ret = 0, gpio; + enum of_gpio_flags flags; + + if (!of_find_property(np, "fcs,vsel-gpio", NULL)) + return ret; + + ret = regmap_read(di->regmap, di->sleep_reg, &val); + if (ret < 0) + return ret; + + ret = regmap_write(di->regmap, di->vol_reg, val); + if (ret < 0) + return ret; + + /* Get GPIO connected to vsel and set its output */ + gpio = of_get_named_gpio_flags(np, "fcs,vsel-gpio", 0, &flags); + if (!gpio_is_valid(gpio)) { + if (gpio != -EPROBE_DEFER) + dev_err(di->dev, "Could not get vsel, ret = %d\n", + gpio); + return gpio; + } + + ret = devm_gpio_request(di->dev, gpio, "fan53555_vsel"); + if (ret) { + dev_err(di->dev, "Failed to obtain gpio %d ret = %d\n", + gpio, ret); + return ret; + } + + ret = gpio_direction_output(gpio, flags & OF_GPIO_ACTIVE_LOW ? 0 : 1); + if (ret) { + dev_err(di->dev, "Failed to set GPIO %d to: %s, ret = %d", + gpio, flags & OF_GPIO_ACTIVE_LOW ? + "GPIO_LOW" : "GPIO_HIGH", ret); + return ret; + } + + return ret; +} + static int fan53555_parse_dt(struct fan53555_device_info *di, struct fan53555_platform_data *pdata, const struct regulator_desc *desc) @@ -390,11 +498,46 @@ static const struct of_device_id fan53555_dt_ids[] = { }, { .compatible = "halo,hl7509", .data = (void *)HALO_HL7509, + }, { + .compatible = "halo,hl7503", + .data = (void *)HALO_HL7503, }, { } }; MODULE_DEVICE_TABLE(of, fan53555_dt_ids); +static int get_reg(void *data, u64 *val) +{ + struct fan53555_device_info *di = data; + int rc; + unsigned int temp = 0; + + rc = regmap_read(di->regmap, di->peek_poke_address, &temp); + if (rc < 0) + dev_err(di->dev, "Couldn't read reg %x rc = %d\n", + di->peek_poke_address, rc); + else + *val = temp; + + return rc; +} + +static int set_reg(void *data, u64 val) +{ + struct fan53555_device_info *di = data; + int rc; + unsigned int temp = 0; + + temp = (unsigned int) val; + rc = regmap_write(di->regmap, di->peek_poke_address, temp); + if (rc < 0) + dev_err(di->dev, "Couldn't write 0x%02x to 0x%02x rc= %d\n", + di->peek_poke_address, temp, rc); + + return rc; +} +DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n"); + static int fan53555_regulator_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -466,6 +609,13 @@ static int fan53555_regulator_probe(struct i2c_client *client, dev_err(&client->dev, "Failed to setup device!\n"); return ret; } + + ret = fan53555_parse_vsel_gpio(di); + if (ret < 0) { + dev_err(&client->dev, "Failed to parse gpio!\n"); + return ret; + } + /* Register regulator */ config.dev = di->dev; config.init_data = di->regulator; @@ -476,8 +626,27 @@ static int fan53555_regulator_probe(struct i2c_client *client, ret = fan53555_regulator_register(di, &config); if (ret < 0) dev_err(&client->dev, "Failed to register regulator!\n"); - return ret; + di->debug_root = debugfs_create_dir("fan53555", NULL); + if (!di->debug_root) + dev_err(&client->dev, "Couldn't create debug dir\n"); + + if (di->debug_root) { + struct dentry *ent; + + ent = debugfs_create_x32("address", S_IFREG | 0644, + di->debug_root, + &(di->peek_poke_address)); + if (!ent) + dev_err(&client->dev, "Couldn't create address debug file\n"); + + ent = debugfs_create_file("data", S_IFREG | 0644, + di->debug_root, di, + &poke_poke_debug_ops); + if (!ent) + dev_err(&client->dev, "Couldn't create data debug file\n"); + } + return ret; } static const struct i2c_device_id fan53555_id[] = { diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c index 45c2b7f0e46ffbea75c67d39e236c5bbc53ba143..79be62e302ff5149ae99235877d1f706abed7f45 100644 --- a/drivers/regulator/qpnp-lcdb-regulator.c +++ b/drivers/regulator/qpnp-lcdb-regulator.c @@ -181,6 +181,7 @@ struct ldo_regulator { int soft_start_us; int vreg_ok_dbc_us; int voltage_mv; + int prev_voltage_mv; }; struct ncp_regulator { @@ -195,6 +196,7 @@ struct ncp_regulator { int soft_start_us; int vreg_ok_dbc_us; int voltage_mv; + int prev_voltage_mv; }; struct bst_params { @@ -228,6 +230,7 @@ struct qpnp_lcdb { bool lcdb_enabled; bool settings_saved; bool lcdb_sc_disable; + bool voltage_step_ramp; int sc_count; ktime_t sc_module_enable_time; @@ -249,6 +252,7 @@ enum lcdb_module { LDO, NCP, BST, + LDO_NCP, }; enum pfm_hysteresis { @@ -316,6 +320,12 @@ static u32 ncp_ilim_ma[] = { .valid = _valid \ } \ +static int qpnp_lcdb_set_voltage_step(struct qpnp_lcdb *lcdb, + int voltage_start_mv, u8 type); + +static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb, + int voltage_mv, u8 type); + static bool is_between(int value, int min, int max) { if (value < min || value > max) @@ -781,9 +791,13 @@ static int qpnp_lcdb_enable_wa(struct qpnp_lcdb *lcdb) return 0; } +#define VOLTAGE_START_MV 4500 +#define VOLTAGE_STEP_MV 500 + static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb) { int rc = 0, timeout, delay; + int voltage_mv = VOLTAGE_START_MV; u8 val = 0; if (lcdb->lcdb_enabled || lcdb->lcdb_sc_disable) { @@ -806,6 +820,22 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb) return rc; } + if (lcdb->voltage_step_ramp) { + if (lcdb->ldo.voltage_mv < VOLTAGE_START_MV) + voltage_mv = lcdb->ldo.voltage_mv; + + rc = qpnp_lcdb_set_voltage(lcdb, voltage_mv, LDO); + if (rc < 0) + return rc; + + if (lcdb->ncp.voltage_mv < VOLTAGE_START_MV) + voltage_mv = lcdb->ncp.voltage_mv; + + rc = qpnp_lcdb_set_voltage(lcdb, voltage_mv, NCP); + if (rc < 0) + return rc; + } + val = MODULE_EN_BIT; rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG, &val, 1); @@ -842,6 +872,17 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb) } lcdb->lcdb_enabled = true; + if (lcdb->voltage_step_ramp) { + usleep_range(10000, 11000); + rc = qpnp_lcdb_set_voltage_step(lcdb, + voltage_mv + VOLTAGE_STEP_MV, + LDO_NCP); + if (rc < 0) { + pr_err("Failed to set LCDB voltage rc=%d\n", rc); + return rc; + } + } + pr_debug("lcdb enabled successfully!\n"); return 0; @@ -1128,6 +1169,56 @@ static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb, return rc; } +static int qpnp_lcdb_set_voltage_step(struct qpnp_lcdb *lcdb, + int voltage_start_mv, u8 type) +{ + int i, ldo_voltage, ncp_voltage, voltage, rc = 0; + + for (i = voltage_start_mv; i <= (MAX_VOLTAGE_MV + VOLTAGE_STEP_MV); + i += VOLTAGE_STEP_MV) { + + ldo_voltage = (lcdb->ldo.voltage_mv < i) ? + lcdb->ldo.voltage_mv : i; + + ncp_voltage = (lcdb->ncp.voltage_mv < i) ? + lcdb->ncp.voltage_mv : i; + if (type == LDO_NCP) { + rc = qpnp_lcdb_set_voltage(lcdb, ldo_voltage, LDO); + if (rc < 0) + return rc; + + rc = qpnp_lcdb_set_voltage(lcdb, ncp_voltage, NCP); + if (rc < 0) + return rc; + + pr_debug(" LDO voltage step %d NCP voltage step %d\n", + ldo_voltage, ncp_voltage); + + if ((i >= lcdb->ncp.voltage_mv) && + (i >= lcdb->ldo.voltage_mv)) + break; + } else { + voltage = (type == LDO) ? ldo_voltage : ncp_voltage; + rc = qpnp_lcdb_set_voltage(lcdb, voltage, type); + if (rc < 0) + return rc; + + pr_debug("%s voltage step %d\n", + (type == LDO) ? "LDO" : "NCP", voltage); + if ((type == LDO) && (i >= lcdb->ldo.voltage_mv)) + break; + + if ((type == NCP) && (i >= lcdb->ncp.voltage_mv)) + break; + + } + + usleep_range(1000, 1100); + } + + return rc; +} + static int qpnp_lcdb_get_voltage(struct qpnp_lcdb *lcdb, u32 *voltage_mv, u8 type) { @@ -1236,11 +1327,17 @@ static int qpnp_lcdb_ldo_regulator_set_voltage(struct regulator_dev *rdev, int rc = 0; struct qpnp_lcdb *lcdb = rdev_get_drvdata(rdev); - rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, LDO); + lcdb->ldo.voltage_mv = min_uV / 1000; + if (lcdb->voltage_step_ramp) + rc = qpnp_lcdb_set_voltage_step(lcdb, + lcdb->ldo.prev_voltage_mv + VOLTAGE_STEP_MV, LDO); + else + rc = qpnp_lcdb_set_voltage(lcdb, lcdb->ldo.voltage_mv, LDO); + if (rc < 0) pr_err("Failed to set LDO voltage rc=%c\n", rc); else - lcdb->ldo.voltage_mv = min_uV / 1000; + lcdb->ldo.prev_voltage_mv = lcdb->ldo.voltage_mv; return rc; } @@ -1309,11 +1406,17 @@ static int qpnp_lcdb_ncp_regulator_set_voltage(struct regulator_dev *rdev, int rc = 0; struct qpnp_lcdb *lcdb = rdev_get_drvdata(rdev); - rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, NCP); + lcdb->ncp.voltage_mv = min_uV / 1000; + if (lcdb->voltage_step_ramp) + rc = qpnp_lcdb_set_voltage_step(lcdb, + lcdb->ncp.prev_voltage_mv + VOLTAGE_STEP_MV, NCP); + else + rc = qpnp_lcdb_set_voltage(lcdb, lcdb->ncp.voltage_mv, NCP); + if (rc < 0) - pr_err("Failed to set LDO voltage rc=%c\n", rc); + pr_err("Failed to set NCP voltage rc=%c\n", rc); else - lcdb->ncp.voltage_mv = min_uV / 1000; + lcdb->ncp.prev_voltage_mv = lcdb->ncp.voltage_mv; return rc; } @@ -1675,6 +1778,8 @@ static int qpnp_lcdb_init_ldo(struct qpnp_lcdb *lcdb) return rc; } + lcdb->ldo.prev_voltage_mv = lcdb->ldo.voltage_mv; + rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_LDO_VREG_OK_CTL_REG, &val, 1); if (rc < 0) { @@ -1780,6 +1885,8 @@ static int qpnp_lcdb_init_ncp(struct qpnp_lcdb *lcdb) return rc; } + lcdb->ncp.prev_voltage_mv = lcdb->ncp.voltage_mv; + rc = qpnp_lcdb_read(lcdb, lcdb->base + LCDB_NCP_VREG_OK_CTL_REG, &val, 1); if (rc < 0) { @@ -2038,6 +2145,9 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb) if (lcdb->sc_irq < 0) pr_debug("sc irq is not defined\n"); + lcdb->voltage_step_ramp = + of_property_read_bool(node, "qcom,voltage-step-ramp"); + return rc; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index a6558609209dae2b6c437352b767082c55b4bedc..39cc5a69246aad53b82951fedca3a15fefcec213 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1614,7 +1614,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, ret = rpmsg_register_device(rpdev); if (ret) - goto free_rpdev; + goto rcid_remove; channel->rpdev = rpdev; } @@ -1622,9 +1622,6 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid, return 0; -free_rpdev: - CH_INFO(channel, "free_rpdev\n"); - kfree(rpdev); rcid_remove: CH_INFO(channel, "rcid_remove\n"); spin_lock_irqsave(&glink->idr_lock, flags); @@ -1799,15 +1796,35 @@ static int qcom_glink_create_chrdev(struct qcom_glink *glink) return rpmsg_chrdev_register_device(rpdev); } +static void qcom_glink_set_affinity(struct qcom_glink *glink, u32 *arr, + size_t size) +{ + struct cpumask cpumask; + int i; + + cpumask_clear(&cpumask); + for (i = 0; i < size; i++) { + if (arr[i] < num_possible_cpus()) + cpumask_set_cpu(arr[i], &cpumask); + } + if (irq_set_affinity(glink->irq, &cpumask)) + dev_err(glink->dev, "failed to set irq affinity\n"); + if (sched_setaffinity(glink->task->pid, &cpumask)) + dev_err(glink->dev, "failed to set task affinity\n"); +} + + struct qcom_glink *qcom_glink_native_probe(struct device *dev, unsigned long features, struct qcom_glink_pipe *rx, struct qcom_glink_pipe *tx, bool intentless) { + struct qcom_glink *glink; + u32 *arr; + int size; int irq; int ret; - struct qcom_glink *glink; glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL); if (!glink) @@ -1866,6 +1883,18 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, glink->irq = irq; + size = of_property_count_u32_elems(dev->of_node, "cpu-affinity"); + if (size > 0) { + arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL); + if (!arr) + return ERR_PTR(-ENOMEM); + ret = of_property_read_u32_array(dev->of_node, "cpu-affinity", + arr, size); + if (!ret) + qcom_glink_set_affinity(glink, arr, size); + kfree(arr); + } + ret = qcom_glink_send_version(glink); if (ret) { dev_err(dev, "failed to send version %d\n", ret); diff --git a/drivers/rpmsg/qcom_glink_spi.c b/drivers/rpmsg/qcom_glink_spi.c index 9f3ef04083401b439d4f0a4e5de10cfc053b1458..ef80542546775691f7ea3950195194c3c6146fc1 100644 --- a/drivers/rpmsg/qcom_glink_spi.c +++ b/drivers/rpmsg/qcom_glink_spi.c @@ -1212,8 +1212,8 @@ static int glink_spi_send_data(struct glink_channel *channel, mutex_lock(&glink->tx_lock); } glink_spi_write(glink, data, intent->addr + intent->offset, chunk_size); - glink_spi_tx_write(glink, &req, sizeof(req), NULL, 0); intent->offset += chunk_size; + glink_spi_tx_write(glink, &req, sizeof(req), NULL, 0); mutex_unlock(&glink->tx_lock); return 0; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index dd9464920456fd550f8da9bab2e345ae843d1d15..ef22b275d0505b5bd9580182b2c67730f3181dc8 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -474,6 +474,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->dma_boundary = 0xffffffff; shost->use_blk_mq = scsi_use_blk_mq; + shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq; device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 604a39dba5d0c7d0a0a54ed4267efeb09ae88b0e..5b4b7f9be2d749da9e8ca2e42b4f51196476d473 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1040,11 +1040,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); if (unlikely(!h->msix_vectors)) return; - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - c->Header.ReplyQueue = - raw_smp_processor_id() % h->nreply_queues; - else - c->Header.ReplyQueue = reply_queue % h->nreply_queues; + c->Header.ReplyQueue = reply_queue; } } @@ -1058,10 +1054,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; - else - cp->ReplyQueue = reply_queue % h->nreply_queues; + cp->ReplyQueue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit (bit 0) @@ -1082,10 +1075,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) @@ -1104,10 +1094,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 @@ -1152,6 +1139,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); + + reply_queue = h->reply_map[raw_smp_processor_id()]; switch (c->cmd_type) { case CMD_IOACCEL1: set_ioaccel1_performant_mode(h, c, reply_queue); @@ -7244,6 +7233,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h) h->msix_vectors = 0; } +static void hpsa_setup_reply_map(struct ctlr_info *h) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < h->msix_vectors; queue++) { + mask = pci_irq_get_affinity(h->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + h->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + h->reply_map[cpu] = 0; +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ @@ -7639,6 +7648,10 @@ static int hpsa_pci_init(struct ctlr_info *h) err = hpsa_interrupt_mode(h); if (err) goto clean1; + + /* setup mapping between CPU and reply queue */ + hpsa_setup_reply_map(h); + err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ @@ -8284,6 +8297,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, return wq; } +static void hpda_free_ctlr_info(struct ctlr_info *h) +{ + kfree(h->reply_map); + kfree(h); +} + +static struct ctlr_info *hpda_alloc_ctlr_info(void) +{ + struct ctlr_info *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return NULL; + + h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL); + if (!h->reply_map) { + kfree(h); + return NULL; + } + return h; +} + static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int dac, rc; @@ -8321,7 +8356,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * the driver. See comments in hpsa.h for more info. */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); - h = kzalloc(sizeof(*h), GFP_KERNEL); + h = hpda_alloc_ctlr_info(); if (!h) { dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; @@ -8726,7 +8761,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) h->lockup_detected = NULL; /* init_one 2 */ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ - kfree(h); /* init_one 1 */ + hpda_free_ctlr_info(h); /* init_one 1 */ } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 018f980a701ce406e9edf0119240c39df47b9858..fb9f5e7f8209447771d07016bca7924774b143af 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -158,6 +158,7 @@ struct bmic_controller_parameters { #pragma pack() struct ctlr_info { + unsigned int *reply_map; int ctlr; char devname[8]; char *product_name; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 63bea6a65d51a11d81783ee61994da758b352b86..8d579bf0fc81b5d283aabfd69ac254ace9a68a51 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2128,34 +2128,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) req_cnt = 1; handle = 0; - if (!sp) - goto skip_cmd_array; - - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) { - ql_log(ql_log_warn, vha, 0x700b, - "No room on outstanding cmd array.\n"); - goto queuing_error; - } - - /* Prep command array. */ - req->current_outstanding_cmd = handle; - req->outstanding_cmds[handle] = sp; - sp->handle = handle; - - /* Adjust entry-counts as needed. */ - if (sp->type != SRB_SCSI_CMD) + if (sp && (sp->type != SRB_SCSI_CMD)) { + /* Adjust entry-counts as needed. */ req_cnt = sp->iocbs; + } -skip_cmd_array: /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) @@ -2179,6 +2156,28 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) if (req->cnt < req_cnt + 2) goto queuing_error; + if (sp) { + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x700b, + "No room on outstanding cmd array.\n"); + goto queuing_error; + } + + /* Prep command array. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + } + /* Prep packet */ req->cnt -= req_cnt; pkt = req->ring_ptr; @@ -2191,6 +2190,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) pkt->handle = handle; } + return pkt; + queuing_error: qpair->tgt_counters.num_alloc_iocb_failed++; return pkt; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 3f3cb72e0c0cdab6a76ea8c4057229f76924899c..d0389b20574d0f778e2bfd95b07e80458970dbd5 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) static int sr_block_open(struct block_device *bdev, fmode_t mode) { struct scsi_cd *cd; + struct scsi_device *sdev; int ret = -ENXIO; + cd = scsi_cd_get(bdev->bd_disk); + if (!cd) + goto out; + + sdev = cd->device; + scsi_autopm_get_device(sdev); check_disk_change(bdev); mutex_lock(&sr_mutex); - cd = scsi_cd_get(bdev->bd_disk); - if (cd) { - ret = cdrom_open(&cd->cdi, bdev, mode); - if (ret) - scsi_cd_put(cd); - } + ret = cdrom_open(&cd->cdi, bdev, mode); mutex_unlock(&sr_mutex); + + scsi_autopm_put_device(sdev); + if (ret) + scsi_cd_put(cd); + +out: return ret; } @@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret) goto out; + scsi_autopm_get_device(sdev); + /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to cdrom/block level. @@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: ret = scsi_ioctl(sdev, cmd, argp); - goto out; + goto put; } ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) - goto out; + goto put; ret = scsi_ioctl(sdev, cmd, argp); +put: + scsi_autopm_put_device(sdev); + out: mutex_unlock(&sr_mutex); return ret; diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c index 7e7360aacf88511c4959039028d6a8bacf52c158..4e477ab886244d606e3d1e35a357a718ca2fe167 100644 --- a/drivers/scsi/ufs/ufs-qcom-ice.c +++ b/drivers/scsi/ufs/ufs-qcom-ice.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -368,8 +368,13 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, return -EINVAL; } - if (qcom_host->hw_ver.major == 0x3) { - /* nothing to do here for version 0x3, exit silently */ + if (qcom_host->hw_ver.major >= 0x3) { + /* + * ICE 3.0 crypto sequences were changed, + * CTRL_INFO register no longer exists + * and doesn't need to be configured. + * The configuration is done via utrd. + */ return 0; } diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 48676f7693e016ca719ecf53dd9fb708a6a2e559..15e0eb99bdacf85ae7ef6ca3e7410cafc4539614 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -937,17 +937,16 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba, req = lrbp->cmd->request; else return 0; - /* - * Right now ICE do not support variable dun but can be - * taken as future enhancement - * if (bio_dun(req->bio)) { - * dun @bio can be split, so we have to adjust offset - * *dun = bio_dun(req->bio); - * } else - */ + + /* Use request LBA or given dun as the DUN value */ if (req->bio) { - *dun = req->bio->bi_iter.bi_sector; - *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; + if (bio_dun(req->bio)) { + /* dun @bio can be split, so we have to adjust offset */ + *dun = bio_dun(req->bio); + } else { + *dun = req->bio->bi_iter.bi_sector; + *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB; + } } ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 603829048be5abf67a1bd9d11e7d6ecef6fc7e19..6d529fe02aecc0118a70942a612de35029c3c80b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7895,7 +7895,8 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) * to recover after multiple retries. */ if (err && ufshcd_is_embedded_dev(hba)) - WARN_ON(1); + BUG(); + /* * After reset the door-bell might be cleared, complete * outstanding requests in s/w here. diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 7c28e8d4955a965805cdde3d2ddb07a7f8c5d6ea..54e3a0f6844c4c81692fe9aaa568efe1f2215866 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -91,9 +91,6 @@ struct virtio_scsi_vq { struct virtio_scsi_target_state { seqcount_t tgt_seq; - /* Count of outstanding requests. */ - atomic_t reqs; - /* Currently active virtqueue for requests sent to this target. */ struct virtio_scsi_vq *req_vq; }; @@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", @@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) } sc->scsi_done(sc); - - atomic_dec(&tgt->reqs); } static void virtscsi_vq_done(struct virtio_scsi *vscsi, @@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; - atomic_inc(&tgt->reqs); return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); } @@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, return &vscsi->req_vqs[hwq]; } -static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, - struct virtio_scsi_target_state *tgt) -{ - struct virtio_scsi_vq *vq; - unsigned long flags; - u32 queue_num; - - local_irq_save(flags); - if (atomic_inc_return(&tgt->reqs) > 1) { - unsigned long seq; - - do { - seq = read_seqcount_begin(&tgt->tgt_seq); - vq = tgt->req_vq; - } while (read_seqcount_retry(&tgt->tgt_seq, seq)); - } else { - /* no writes can be concurrent because of atomic_t */ - write_seqcount_begin(&tgt->tgt_seq); - - /* keep previous req_vq if a reader just arrived */ - if (unlikely(atomic_read(&tgt->reqs) > 1)) { - vq = tgt->req_vq; - goto unlock; - } - - queue_num = smp_processor_id(); - while (unlikely(queue_num >= vscsi->num_queues)) - queue_num -= vscsi->num_queues; - tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; - unlock: - write_seqcount_end(&tgt->tgt_seq); - } - local_irq_restore(flags); - - return vq; -} - static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; - struct virtio_scsi_vq *req_vq; - - if (shost_use_blk_mq(sh)) - req_vq = virtscsi_pick_vq_mq(vscsi, sc); - else - req_vq = virtscsi_pick_vq(vscsi, tgt); + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); return virtscsi_queuecommand(vscsi, req_vq, sc); } @@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget) return -ENOMEM; seqcount_init(&tgt->tgt_seq); - atomic_set(&tgt->reqs, 0); tgt->req_vq = &vscsi->req_vqs[0]; starget->hostdata = tgt; @@ -823,6 +768,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, .track_queue_depth = 1, + .force_blk_mq = 1, }; static struct scsi_host_template virtscsi_host_template_multi = { @@ -844,6 +790,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .target_destroy = virtscsi_target_destroy, .map_queues = virtscsi_map_queues, .track_queue_depth = 1, + .force_blk_mq = 1, }; #define virtscsi_config_get(vdev, fld) \ diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c index d7387670e4ef700656643546a5565e5b0c53ad42..dfdbd8e8df698be5cc4b2e8e64bb016bfebadd85 100644 --- a/drivers/sensors/sensors_ssc.c +++ b/drivers/sensors/sensors_ssc.c @@ -32,6 +32,7 @@ #define IMAGE_LOAD_CMD 1 #define IMAGE_UNLOAD_CMD 0 +#define SSR_RESET_CMD 1 #define CLASS_NAME "ssc" #define DRV_NAME "sensors" #define DRV_VERSION "2.00" @@ -53,6 +54,10 @@ static ssize_t slpi_boot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t slpi_ssr_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count); + struct slpi_loader_private { void *pil_h; struct kobject *boot_slpi_obj; @@ -62,8 +67,12 @@ struct slpi_loader_private { static struct kobj_attribute slpi_boot_attribute = __ATTR(boot, 0220, NULL, slpi_boot_store); +static struct kobj_attribute slpi_ssr_attribute = + __ATTR(ssr, 0220, NULL, slpi_ssr_store); + static struct attribute *attrs[] = { &slpi_boot_attribute.attr, + &slpi_ssr_attribute.attr, NULL, }; @@ -138,6 +147,44 @@ static void slpi_loader_unload(struct platform_device *pdev) } } +static ssize_t slpi_ssr_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + int ssr_cmd = 0; + struct subsys_device *sns_dev = NULL; + struct platform_device *pdev = slpi_private; + struct slpi_loader_private *priv = NULL; + + pr_debug("%s: going to call slpi_ssr\n", __func__); + + if (kstrtoint(buf, 10, &ssr_cmd) < 0) + return -EINVAL; + + if (ssr_cmd != SSR_RESET_CMD) + return -EINVAL; + + priv = platform_get_drvdata(pdev); + if (!priv) + return -EINVAL; + + sns_dev = (struct subsys_device *)priv->pil_h; + if (!sns_dev) + return -EINVAL; + + dev_err(&pdev->dev, "Something went wrong with SLPI, restarting\n"); + + /* subsystem_restart_dev has worker queue to handle */ + if (subsystem_restart_dev(sns_dev) != 0) { + dev_err(&pdev->dev, "subsystem_restart_dev failed\n"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "SLPI restarted\n"); + return count; +} + static ssize_t slpi_boot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 314a163faa262e2dfa96a4c844145f05f00936b0..3647c0e2af36f71bbc4ca14c5da62be9e7d5ce2d 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -833,4 +833,11 @@ config QCOM_CX_IPEAK bit in tcsr register if it is going to cross its own threshold. If all clients are going to cross their thresholds then Cx ipeak hw module will raise an interrupt to cDSP block to throttle cDSP fmax. + +config QCOM_AOP_DDR_MESSAGING + bool "Send messages to AOP about the DDR frequency during reboot" + help + This driver sends messages to the AOP to adjust the DDR frequency when + the device is rebooting, to ensure that the device is powered off + cleanly. endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 05febcc529e757a7e1df4efdaff13252a4469fbd..c7f741fbf9137642c054b81fe915a50d524ed233 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -93,3 +93,4 @@ obj-$(CONFIG_QCOM_SMP2P_SLEEPSTATE) += smp2p_sleepstate.o obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o +obj-$(CONFIG_QCOM_AOP_DDR_MESSAGING) += aop_ddr_msgs.o diff --git a/drivers/soc/qcom/aop_ddr_msgs.c b/drivers/soc/qcom/aop_ddr_msgs.c new file mode 100644 index 0000000000000000000000000000000000000000..b7be6974fb16ce909d6282d532a0a2db60773d0b --- /dev/null +++ b/drivers/soc/qcom/aop_ddr_msgs.c @@ -0,0 +1,123 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_AOP_MSG_LEN 96 +#define AOP_DDR_TARG_FREQ_MHZ 300 + +struct mbox_data { + struct mbox_client cl; + struct mbox_chan *mbox; + struct notifier_block reboot_notif_blk; +}; + +static void send_aop_ddr_freq_msg(struct mbox_data *aop_mbox, int freq_mhz) +{ + struct qmp_pkt pkt; + char mbox_msg[MAX_AOP_MSG_LEN + 1] = {0}; + int rc; + + scnprintf(mbox_msg, MAX_AOP_MSG_LEN, + "{class: ddr, res: fixed, val: %d}", freq_mhz); + pkt.size = MAX_AOP_MSG_LEN; + pkt.data = mbox_msg; + + rc = mbox_send_message(aop_mbox->mbox, &pkt); + + if (rc < 0) + pr_err("Failed to send AOP DDR freq msg: %d rc: %d\n", freq_mhz, + rc); +} + +static int aop_ddr_freq_msg_handler(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct mbox_data *aop_mbox = container_of(this, struct mbox_data, + reboot_notif_blk); + + if (event == SYS_HALT || event == SYS_POWER_OFF) + send_aop_ddr_freq_msg(aop_mbox, AOP_DDR_TARG_FREQ_MHZ); + + return NOTIFY_DONE; +} + +static int aop_ddr_msgs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int rc; + struct mbox_data *aop_mbox = devm_kzalloc(dev, sizeof(*aop_mbox), + GFP_KERNEL); + + if (!aop_mbox) + return -ENOMEM; + + aop_mbox->cl.dev = dev; + aop_mbox->cl.tx_block = true; + aop_mbox->cl.tx_tout = 1000; + aop_mbox->cl.knows_txdone = false; + aop_mbox->mbox = mbox_request_channel(&aop_mbox->cl, 0); + if (IS_ERR(aop_mbox->mbox)) { + rc = PTR_ERR(aop_mbox->mbox); + pr_err("Failed to get mailbox channel rc: %d\n", rc); + return rc; + } + + aop_mbox->reboot_notif_blk.notifier_call = aop_ddr_freq_msg_handler; + platform_set_drvdata(pdev, aop_mbox); + rc = register_reboot_notifier(&aop_mbox->reboot_notif_blk); + if (rc < 0) { + pr_err("Failed to register to the reboot notifier rc: %d\n", + rc); + mbox_free_channel(aop_mbox->mbox); + platform_set_drvdata(pdev, NULL); + } + return rc; +} + +static int aop_ddr_msgs_remove(struct platform_device *pdev) +{ + struct mbox_data *aop_mbox = platform_get_drvdata(pdev); + + mbox_free_channel(aop_mbox->mbox); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id of_aop_ddr_match_tbl[] = { + { .compatible = "qcom,aop-ddr-msgs", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_aop_ddr_match_tbl); + +static struct platform_driver aop_ddr_msgs_driver = { + .probe = aop_ddr_msgs_probe, + .remove = aop_ddr_msgs_remove, + .driver = { + .name = "aop-ddr-msgs", + .of_match_table = of_match_ptr(of_aop_ddr_match_tbl), + }, +}; +module_platform_driver(aop_ddr_msgs_driver); + +MODULE_DESCRIPTION("Qualcomm Technologies Inc AOP DDR Messaging driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c index 1fcf9609a486332aa99a1de126ab6adfb571b46a..95d7443334a1b181d1d882b10f89922bfc6229f2 100644 --- a/drivers/soc/qcom/dfc_qmi.c +++ b/drivers/soc/qcom/dfc_qmi.c @@ -20,6 +20,8 @@ #define CREATE_TRACE_POINTS #include +#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6) + #define DFC_MAX_BEARERS_V01 16 #define DFC_MAX_QOS_ID_V01 2 @@ -79,7 +81,7 @@ static void dfc_do_burst_flow_control(struct work_struct *work); #define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7 #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022 -#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 424 +#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540 struct dfc_bind_client_req_msg_v01 { u8 ep_id_valid; @@ -234,12 +236,74 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { }, }; +struct dfc_ancillary_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 reserved; +}; + +static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + subs_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + mux_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + bearer_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + reserved), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + struct dfc_flow_status_ind_msg_v01 { u8 flow_status_valid; u8 flow_status_len; struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; u8 eod_ack_reqd_valid; u8 eod_ack_reqd; + u8 ancillary_info_valid; + u8 ancillary_info_len; + struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01]; }; struct dfc_svc_ind { @@ -400,6 +464,40 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { eod_ack_reqd), .ei_array = NULL, }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_ancillary_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info), + .ei_array = dfc_ancillary_info_type_v01_ei, + }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, @@ -587,11 +685,18 @@ static int dfc_bearer_flow_ctl(struct net_device *dev, itm = list_entry(p, struct rmnet_flow_map, list); if (itm->bearer_id == bearer->bearer_id) { + /* + * Do not flow disable ancillary q if ancillary is true + */ + if (bearer->ancillary && enable == 0 && + DFC_IS_ANCILLARY(itm->ip_type)) + continue; + qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle, enable); - trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id, - bearer->grant_size, qlen, - itm->tcm_handle, enable); + trace_dfc_qmi_tc(dev->name, itm->bearer_id, + itm->flow_id, bearer->grant_size, + qlen, itm->tcm_handle, enable); rc++; } } @@ -605,7 +710,7 @@ static int dfc_bearer_flow_ctl(struct net_device *dev, } static int dfc_all_bearer_flow_ctl(struct net_device *dev, - struct qos_info *qos, u8 ack_req, + struct qos_info *qos, u8 ack_req, u32 ancillary, struct dfc_flow_status_info_type_v01 *fc_info) { struct list_head *p; @@ -621,6 +726,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev, qmi_rmnet_grant_per(bearer_itm->grant_size); bearer_itm->seq = fc_info->seq_num; bearer_itm->ack_req = ack_req; + bearer_itm->ancillary = ancillary; } enable = fc_info->num_bytes > 0 ? 1 : 0; @@ -630,7 +736,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev, else netif_tx_stop_all_queues(dev); - trace_dfc_qmi_tc(0xFF, 0, fc_info->num_bytes, 0, 0, enable); + trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable); if (enable == 0 && ack_req) dfc_send_ack(dev, fc_info->bearer_id, @@ -641,7 +747,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev, } static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, - u8 ack_req, + u8 ack_req, u32 ancillary, struct dfc_flow_status_info_type_v01 *fc_info) { struct rmnet_bearer_map *itm = NULL; @@ -659,6 +765,7 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size); itm->seq = fc_info->seq_num; itm->ack_req = ack_req; + itm->ancillary = ancillary; if (action != -1) rc = dfc_bearer_flow_ctl(dev, itm, qos); @@ -672,13 +779,14 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, static void dfc_do_burst_flow_control(struct work_struct *work) { struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work; - struct dfc_flow_status_ind_msg_v01 *ind = - (struct dfc_flow_status_ind_msg_v01 *)&svc_ind->dfc_info; + struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info; struct net_device *dev; struct qos_info *qos; struct dfc_flow_status_info_type_v01 *flow_status; + struct dfc_ancillary_info_type_v01 *ai; u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; - int i; + u32 ancillary; + int i, j; if (unlikely(svc_ind->data->restart_state)) { kfree(svc_ind); @@ -689,12 +797,27 @@ static void dfc_do_burst_flow_control(struct work_struct *work) for (i = 0; i < ind->flow_status_len; i++) { flow_status = &ind->flow_status[i]; + + ancillary = 0; + if (ind->ancillary_info_valid) { + for (j = 0; j < ind->ancillary_info_len; j++) { + ai = &ind->ancillary_info[j]; + if (ai->mux_id == flow_status->mux_id && + ai->bearer_id == flow_status->bearer_id) { + ancillary = ai->reserved; + break; + } + } + } + trace_dfc_flow_ind(svc_ind->data->index, i, flow_status->mux_id, flow_status->bearer_id, flow_status->num_bytes, flow_status->seq_num, - ack_req); + ack_req, + ancillary); + dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port, flow_status->mux_id); if (!dev) @@ -708,9 +831,10 @@ static void dfc_do_burst_flow_control(struct work_struct *work) if (unlikely(flow_status->bearer_id == 0xFF)) dfc_all_bearer_flow_ctl( - dev, qos, ack_req, flow_status); + dev, qos, ack_req, ancillary, flow_status); else - dfc_update_fc_map(dev, qos, ack_req, flow_status); + dfc_update_fc_map( + dev, qos, ack_req, ancillary, flow_status); spin_unlock_bh(&qos->qos_lock); } @@ -892,7 +1016,7 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, struct rmnet_flow_map *itm; u32 start_grant; - spin_lock(&qos->qos_lock); + spin_lock_bh(&qos->qos_lock); itm = qmi_rmnet_get_flow_map(qos, mark, ip_type); if (unlikely(!itm)) @@ -902,7 +1026,8 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, if (unlikely(!bearer)) goto out; - trace_dfc_flow_check(bearer->bearer_id, len, bearer->grant_size); + trace_dfc_flow_check(dev->name, bearer->bearer_id, + len, bearer->grant_size); if (!bearer->grant_size) goto out; @@ -924,5 +1049,17 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, dfc_bearer_flow_ctl(dev, bearer, qos); out: - spin_unlock(&qos->qos_lock); + spin_unlock_bh(&qos->qos_lock); +} + +void dfc_qmi_wq_flush(struct qmi_info *qmi) +{ + struct dfc_qmi_data *dfc_data; + int i; + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client); + if (dfc_data) + flush_workqueue(dfc_data->dfc_wq); + } } diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c index c415881a25193720d3c62a5439acf40fa673f5f1..8d4b37d5ec878f04a66389e52e4b2f81e86f988f 100644 --- a/drivers/soc/qcom/fsa4480-i2c.c +++ b/drivers/soc/qcom/fsa4480-i2c.c @@ -38,7 +38,6 @@ struct fsa4480_priv { struct device *dev; struct power_supply *usb_psy; struct notifier_block psy_nb; - bool usbc_force_pr_mode; atomic_t usbc_mode; struct work_struct usbc_analog_work; struct blocking_notifier_head fsa4480_notifier; @@ -270,23 +269,11 @@ static int fsa4480_usbc_analog_setup_switches (struct fsa4480_priv *fsa_priv, bool active) { int rc = 0; - union power_supply_propval pval; dev_dbg(fsa_priv->dev, "%s: setting GPIOs active = %d\n", __func__, active); - memset(&pval, 0, sizeof(pval)); - if (active) { - pval.intval = POWER_SUPPLY_TYPEC_PR_SOURCE; - if (power_supply_set_property(fsa_priv->usb_psy, - POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval)) - dev_err(fsa_priv->dev, - "%s: force PR_SOURCE mode unsuccessful\n", - __func__); - else - fsa_priv->usbc_force_pr_mode = true; - /* activate switches */ fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F); @@ -300,17 +287,6 @@ static int fsa4480_usbc_analog_setup_switches /* deactivate switches */ fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98); - - if (fsa_priv->usbc_force_pr_mode) { - pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL; - if (power_supply_set_property(fsa_priv->usb_psy, - POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &pval)) - dev_err(fsa_priv->dev, - "%s: force PR_DUAL unsuccessful\n", - __func__); - - fsa_priv->usbc_force_pr_mode = false; - } } return rc; diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 5c354fdb0df1b3b65b5cbd0f942eef250a37d29a..a4356a45a2af1384e069241de9bd15586f7bd7b2 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -36,7 +36,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -653,10 +654,17 @@ static irqreturn_t fw_error_fatal_handler(int irq, void *ctx) static irqreturn_t fw_crash_indication_handler(int irq, void *ctx) { struct icnss_priv *priv = ctx; + struct icnss_uevent_fw_down_data fw_down_data = {0}; icnss_pr_err("Received early crash indication from FW\n"); if (priv) { + if (test_bit(ICNSS_FW_READY, &priv->state) && + !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state)) { + fw_down_data.crashed = true; + icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, + &fw_down_data); + } set_bit(ICNSS_FW_DOWN, &priv->state); icnss_ignore_fw_timeout(true); } @@ -667,61 +675,84 @@ static irqreturn_t fw_crash_indication_handler(int irq, void *ctx) return IRQ_HANDLED; } -static void register_fw_error_notifications(struct icnss_priv *priv) +static void register_fw_error_notifications(struct device *dev) { - int gpio, irq, ret; + struct icnss_priv *priv = dev_get_drvdata(dev); + struct device_node *dev_node; + int irq = 0, ret = 0; - if (!of_find_property(priv->pdev->dev.of_node, - "qcom,gpio-force-fatal-error", NULL)) { - icnss_pr_dbg("Error fatal smp2p handler not registered\n"); + if (!priv) return; - } - gpio = of_get_named_gpio(priv->pdev->dev.of_node, - "qcom,gpio-force-fatal-error", 0); - if (!gpio_is_valid(gpio)) { - icnss_pr_err("Invalid GPIO for error fatal smp2p %d\n", gpio); + + dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in"); + if (!dev_node) { + icnss_pr_err("Failed to get smp2p node for force-fatal-error\n"); return; } - irq = gpio_to_irq(gpio); - if (irq < 0) { - icnss_pr_err("Invalid IRQ for error fatal smp2p %u\n", irq); - return; + + icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name); + + if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) { + ret = irq = of_irq_get_byname(dev_node, + "qcom,smp2p-force-fatal-error"); + if (ret < 0) { + icnss_pr_err("Unable to get force-fatal-error irq %d\n", + irq); + return; + } } - ret = request_irq(irq, fw_error_fatal_handler, - IRQF_TRIGGER_RISING, "wlanfw-err", priv); + + ret = devm_request_threaded_irq(dev, irq, NULL, fw_error_fatal_handler, + IRQF_TRIGGER_RISING, "wlanfw-err", + priv); if (ret < 0) { - icnss_pr_err("Unable to register for error fatal IRQ handler %d", - irq); + icnss_pr_err("Unable to register for error fatal IRQ handler %d ret = %d", + irq, ret); return; } - icnss_pr_dbg("FW force error fatal handler registered\n"); + icnss_pr_dbg("FW force error fatal handler registered irq = %d\n", irq); + priv->fw_error_fatal_irq = irq; +} - if (!of_find_property(priv->pdev->dev.of_node, - "qcom,gpio-early-crash-ind", NULL)) { - icnss_pr_dbg("FW early crash indication handler not registered\n"); +static void register_early_crash_notifications(struct device *dev) +{ + struct icnss_priv *priv = dev_get_drvdata(dev); + struct device_node *dev_node; + int irq = 0, ret = 0; + + if (!priv) return; - } - gpio = of_get_named_gpio(priv->pdev->dev.of_node, - "qcom,gpio-early-crash-ind", 0); - if (!gpio_is_valid(gpio)) { - icnss_pr_err("Invalid GPIO for early crash indication %d\n", - gpio); + + dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in"); + if (!dev_node) { + icnss_pr_err("Failed to get smp2p node for early-crash-ind\n"); return; } - irq = gpio_to_irq(gpio); - if (irq < 0) { - icnss_pr_err("Invalid IRQ for early crash indication %u\n", - irq); - return; + + icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name); + + if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) { + ret = irq = of_irq_get_byname(dev_node, + "qcom,smp2p-early-crash-ind"); + if (ret < 0) { + icnss_pr_err("Unable to get early-crash-ind irq %d\n", + irq); + return; + } } - ret = request_irq(irq, fw_crash_indication_handler, - IRQF_TRIGGER_RISING, "wlanfw-early-crash-ind", priv); + + ret = devm_request_threaded_irq(dev, irq, NULL, + fw_crash_indication_handler, + IRQF_TRIGGER_RISING, + "wlanfw-early-crash-ind", + priv); if (ret < 0) { - icnss_pr_err("Unable to register for early crash indication IRQ handler %d", - irq); + icnss_pr_err("Unable to register for early crash indication IRQ handler %d ret = %d", + irq, ret); return; } - icnss_pr_dbg("FW crash indication handler registered\n"); + icnss_pr_dbg("FW crash indication handler registered irq = %d\n", irq); + priv->fw_early_crash_irq = irq; } int icnss_call_driver_uevent(struct icnss_priv *priv, @@ -794,7 +825,11 @@ static int icnss_driver_event_server_arrive(void *data) wlfw_dynamic_feature_mask_send_sync_msg(penv, dynamic_feature_mask); - register_fw_error_notifications(penv); + if (!penv->fw_error_fatal_irq) + register_fw_error_notifications(&penv->pdev->dev); + + if (!penv->fw_early_crash_irq) + register_early_crash_notifications(&penv->pdev->dev); return ret; @@ -908,8 +943,8 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv) ret = priv->ops->reinit(&priv->pdev->dev); if (ret < 0) { - icnss_pr_err("Driver reinit failed: %d, state: 0x%lx\n", - ret, priv->state); + icnss_fatal_err("Driver reinit failed: %d, state: 0x%lx\n", + ret, priv->state); if (!priv->allow_recursive_recovery) ICNSS_ASSERT(false); goto out_power_off; @@ -1065,8 +1100,8 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv, } if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) { - icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n", - event_data->crashed, priv->state); + icnss_fatal_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n", + event_data->crashed, priv->state); if (!priv->allow_recursive_recovery) ICNSS_ASSERT(0); goto out; @@ -1732,6 +1767,8 @@ EXPORT_SYMBOL(icnss_disable_irq); int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info) { + char *fw_build_timestamp = NULL; + if (!penv || !dev) { icnss_pr_err("Platform driver not initialized\n"); return -EINVAL; @@ -1744,6 +1781,8 @@ int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info) info->board_id = penv->board_id; info->soc_id = penv->soc_id; info->fw_version = penv->fw_version_info.fw_version; + fw_build_timestamp = penv->fw_version_info.fw_build_timestamp; + fw_build_timestamp[WLFW_MAX_TIMESTAMP_LEN] = '\0'; strlcpy(info->fw_build_timestamp, penv->fw_version_info.fw_build_timestamp, WLFW_MAX_TIMESTAMP_LEN + 1); diff --git a/drivers/soc/qcom/icnss_private.h b/drivers/soc/qcom/icnss_private.h index 1464eb1bbe5707a98aaeba39bc5f556000cc351f..7fc3c9cbf9f076024d2f3efa5216cbf9248dc0f1 100644 --- a/drivers/soc/qcom/icnss_private.h +++ b/drivers/soc/qcom/icnss_private.h @@ -88,6 +88,9 @@ #define ICNSS_ASSERT(_condition) do { } while (0) #endif +#define icnss_fatal_err(_fmt, ...) \ + icnss_pr_err("fatal: "_fmt, ##__VA_ARGS__) + enum icnss_debug_quirks { HW_ALWAYS_ON, HW_DEBUG_ENABLE, @@ -348,6 +351,8 @@ struct icnss_priv { u16 line_number; struct mutex dev_lock; bool is_hyp_disabled; + uint32_t fw_error_fatal_irq; + uint32_t fw_early_crash_irq; char function_name[WLFW_FUNCTION_NAME_LEN + 1]; }; diff --git a/drivers/soc/qcom/llcc-sdmmagpie.c b/drivers/soc/qcom/llcc-sdmmagpie.c index 4f7397e839574dee821cff0b4539868e3f62e016..04a678315b70b1c0d91875f8d77edf96f8392e61 100644 --- a/drivers/soc/qcom/llcc-sdmmagpie.c +++ b/drivers/soc/qcom/llcc-sdmmagpie.c @@ -58,10 +58,11 @@ static struct llcc_slice_config sdmmagpie_data[] = { SCT_ENTRY("cpuss", 1, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1), - SCT_ENTRY("modem", 8, 8, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("modemhw", 9, 9, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("gpuhtw", 11, 11, 128, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("gpu", 12, 12, 384, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modem", 8, 8, 512, 0, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhw", 9, 9, 512, 0, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("gpuhtw", 11, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("gpu", 12, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("npu", 23, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), }; static int sdmmagpie_qcom_llcc_probe(struct platform_device *pdev) diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index ccf95258448101206ba80b65d6281084f9ebb160..b14be2ab7645db216b25cc3c56944328507b66b8 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -145,17 +145,8 @@ struct llcc_slice_desc *llcc_slice_getd(struct device *dev, const char *name) const char *slice_name; struct property *prop; - if (!np) { - dev_err(dev, "%s() currently only supports DT\n", __func__); + if (!np || !of_get_property(np, "cache-slice-names", NULL)) return ERR_PTR(-ENOENT); - } - - if (!of_get_property(np, "cache-slice-names", NULL)) { - dev_err(dev, - "%s() requires a \"cache-slice-names\" property\n", - __func__); - return ERR_PTR(-ENOENT); - } of_property_for_each_string(np, "cache-slice-names", prop, slice_name) { if (!strcmp(name, slice_name)) diff --git a/drivers/soc/qcom/llcc-sm6150.c b/drivers/soc/qcom/llcc-sm6150.c index b6dfbd3d55d42c0b98f93412fa3b65de8aabbb9c..12b2bb3453fa6bef34329e41d7acadc002085156 100644 --- a/drivers/soc/qcom/llcc-sm6150.c +++ b/drivers/soc/qcom/llcc-sm6150.c @@ -58,7 +58,9 @@ static struct llcc_slice_config sm6150_data[] = { SCT_ENTRY("cpuss", 1, 1, 256, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1), - SCT_ENTRY("modem", 8, 8, 256, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modem", 8, 8, 256, 0, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("gpuhtw", 11, 11, 128, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("gpu", 12, 12, 128, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("mmuhwt", 13, 13, 256, 1, 0, 0xF, 0x0, 0, 0, 0, 0, 1), }; diff --git a/drivers/soc/qcom/llcc-sm8150.c b/drivers/soc/qcom/llcc-sm8150.c index 63e98ceb3489ede33f0606fd334e5790e5a00787..e8c9a528eaae48cc3baa71d091399b44a4dab0a6 100644 --- a/drivers/soc/qcom/llcc-sm8150.c +++ b/drivers/soc/qcom/llcc-sm8150.c @@ -62,9 +62,9 @@ static struct llcc_slice_config sm8150_data[] = { SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("voice", 5, 5, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("audio", 6, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("modemhp_grow", 7, 7, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhp_grow", 7, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0), SCT_ENTRY("modem", 8, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("modemhw", 9, 9, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhw", 9, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("compute", 10, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpuhtw", 11, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpu", 12, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), @@ -77,8 +77,8 @@ static struct llcc_slice_config sm8150_data[] = { SCT_ENTRY("npu", 23, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("wlan_hw", 24, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modem_vpe", 29, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("ap_tcm", 30, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0), - SCT_ENTRY("write_cache", 31, 31, 128, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 0), + SCT_ENTRY("ap_tcm", 30, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0), + SCT_ENTRY("write_cache", 31, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0), }; static int sm8150_qcom_llcc_probe(struct platform_device *pdev) diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c index 4b0e0882ef82454cb4979551771e8c6f6bb6e34d..7cd6850395849a6500116c0bc3e4d73ea483e4fa 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.c +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -371,14 +371,12 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code, memblock[i].hyp_mapping = 0; } } - if (memblock[i].client_id == 1) { - /* - * Check if the client id - * is of diag so that free - * the memory region of - * client's size + guard - * bytes of 4K. - */ + if (memblock[i].guard_band) { + /* + * Check if the client required guard band + * support so the memory region of client's + * size + guard bytes of 4K can be freed. + */ size += MEMSHARE_GUARD_BYTES; } dma_free_attrs(memsh_drv->dev, @@ -463,7 +461,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle, } if (!memblock[client_id].allotted) { - if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0) + if (memblock[client_id].guard_band && alloc_req->num_bytes > 0) size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES; else size = alloc_req->num_bytes; @@ -555,14 +553,12 @@ static void handle_free_generic_req(struct qmi_handle *handle, __func__, client_id); } size = memblock[client_id].size; - if (memblock[client_id].client_id == 1) { - /* - * Check if the client id - * is of diag so that free - * the memory region of - * client's size + guard - * bytes of 4K. - */ + if (memblock[client_id].guard_band) { + /* + * Check if the client required guard band support so + * the memory region of client's size + guard + * bytes of 4K can be freed + */ size += MEMSHARE_GUARD_BYTES; } dma_free_attrs(memsh_drv->dev, size, @@ -655,7 +651,7 @@ static void handle_query_size_req(struct qmi_handle *handle, static void mem_share_svc_disconnect_cb(struct qmi_handle *qmi, unsigned int node, unsigned int port) { - pr_debug("memshare: Received QMI client disconnect event\n"); + return; } static struct qmi_ops server_ops = { @@ -787,6 +783,10 @@ static int memshare_child_probe(struct platform_device *pdev) pdev->dev.of_node, "qcom,allocate-on-request"); + memblock[num_clients].guard_band = of_property_read_bool( + pdev->dev.of_node, + "qcom,guard-band"); + rc = of_property_read_string(pdev->dev.of_node, "label", &name); if (rc) { @@ -809,7 +809,7 @@ static int memshare_child_probe(struct platform_device *pdev) * Memshare allocation for guaranteed clients */ if (memblock[num_clients].guarantee && size > 0) { - if (client_id == 1) + if (memblock[num_clients].guard_band) size += MEMSHARE_GUARD_BYTES; rc = memshare_alloc(memsh_child->dev, size, diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h index 908f091c86ebfee9202d22c435b6ab1e2bad4085..b94b138d285614846ff54bff566fba0bba67a7f6 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.h +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -43,6 +43,8 @@ struct mem_blocks { uint32_t alloc_request; /* Allocation on request from a client*/ uint32_t client_request; + /* Guard band around the allotted region*/ + uint32_t guard_band; /* Size required for client */ uint32_t size; /* diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c index 45094f1fc300591af41539304f4e36b7880d7ab4..5b731fec3420303324413eec2949aa76c175e48c 100644 --- a/drivers/soc/qcom/msm_performance.c +++ b/drivers/soc/qcom/msm_performance.c @@ -25,7 +25,14 @@ #include #include #include +#include +/* + * Sched will provide the data for every 20ms window, + * will collect the data for 15 windows(300ms) and then update + * sysfs nodes with aggregated data + */ +#define POLL_INT 15 /* To handle cpufreq min/max request */ struct cpu_status { @@ -42,6 +49,9 @@ struct events { static struct events events_group; static struct task_struct *events_notify_thread; +static unsigned int aggr_big_nr; +static unsigned int aggr_top_load; + /*******************************sysfs start************************************/ static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp) { @@ -206,7 +216,38 @@ static struct attribute_group events_attr_group = { .attrs = events_attrs, }; +static ssize_t show_big_nr(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", aggr_big_nr); +} + +static struct kobj_attribute big_nr_attr = +__ATTR(aggr_big_nr, 0444, show_big_nr, NULL); + +static ssize_t show_top_load(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", aggr_top_load); +} + +static struct kobj_attribute top_load_attr = +__ATTR(aggr_top_load, 0444, show_top_load, NULL); + + +static struct attribute *notify_attrs[] = { + &big_nr_attr.attr, + &top_load_attr.attr, + NULL, +}; + +static struct attribute_group notify_attr_group = { + .attrs = notify_attrs, +}; +static struct kobject *notify_kobj; + /*******************************sysfs ends************************************/ + static int perf_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { @@ -282,6 +323,32 @@ static int events_notify_userspace(void *data) return 0; } +static int init_notify_group(void) +{ + int ret; + struct kobject *module_kobj; + + module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); + if (!module_kobj) { + pr_err("msm_perf: Couldn't find module kobject\n"); + return -ENOENT; + } + + notify_kobj = kobject_create_and_add("notify", module_kobj); + if (!notify_kobj) { + pr_err("msm_perf: Failed to add notify_kobj\n"); + return -ENOMEM; + } + + ret = sysfs_create_group(notify_kobj, ¬ify_attr_group); + if (ret) { + kobject_put(notify_kobj); + pr_err("msm_perf: Failed to create sysfs\n"); + return ret; + } + return 0; +} + static int init_events_group(void) { int ret; @@ -316,6 +383,67 @@ static int init_events_group(void) return 0; } +static void nr_notify_userspace(struct work_struct *work) +{ + sysfs_notify(notify_kobj, NULL, "aggr_top_load"); + sysfs_notify(notify_kobj, NULL, "aggr_big_nr"); +} + +static int msm_perf_core_ctl_notify(struct notifier_block *nb, + unsigned long unused, + void *data) +{ + static unsigned int tld, nrb, i; + static DECLARE_WORK(sysfs_notify_work, nr_notify_userspace); + struct core_ctl_notif_data *d = data; + + + nrb += d->nr_big; + tld += d->coloc_load_pct; + i++; + if (i == POLL_INT) { + aggr_big_nr = ((nrb%POLL_INT) ? 1 : 0) + nrb/POLL_INT; + aggr_top_load = tld/POLL_INT; + tld = 0; + nrb = 0; + i = 0; + schedule_work(&sysfs_notify_work); + } + return NOTIFY_OK; +} + +static struct notifier_block msm_perf_nb = { + .notifier_call = msm_perf_core_ctl_notify +}; + +static bool core_ctl_register; +static int set_core_ctl_register(const char *buf, const struct kernel_param *kp) +{ + int ret; + bool old_val = core_ctl_register; + + ret = param_set_bool(buf, kp); + if (ret < 0) + return ret; + + if (core_ctl_register == old_val) + return 0; + + if (core_ctl_register) + core_ctl_notifier_register(&msm_perf_nb); + else + core_ctl_notifier_unregister(&msm_perf_nb); + + return 0; +} + +static const struct kernel_param_ops param_ops_cc_register = { + .set = set_core_ctl_register, + .get = param_get_bool, +}; +module_param_cb(core_ctl_register, ¶m_ops_cc_register, + &core_ctl_register, 0644); + static int __init msm_performance_init(void) { unsigned int cpu; @@ -332,6 +460,7 @@ static int __init msm_performance_init(void) NULL); init_events_group(); + init_notify_group(); return 0; } diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index 672e4eeb560868a21f1125722725cf7c0a09c4cc..80700db833be14603aacb75260934fa202c0f4c5 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -896,7 +896,6 @@ static int pil_notify_aop(struct pil_desc *desc, char *status) static DECLARE_RWSEM(pil_pm_rwsem); struct pil_seg_data { - int seg_id; struct pil_desc *desc; struct pil_seg *seg; struct work_struct load_seg_work; @@ -916,20 +915,27 @@ static void pil_load_seg_work_fn(struct work_struct *work) static int pil_load_segs(struct pil_desc *desc) { + int ret = 0; int seg_id = 0; struct pil_priv *priv = desc->priv; struct pil_seg_data *pil_seg_data; struct pil_seg *seg; - DECLARE_BITMAP(err_map, priv->num_segs); + unsigned long *err_map; - pil_seg_data = kcalloc(priv->num_segs, sizeof(*pil_seg_data), + err_map = kcalloc(BITS_TO_LONGS(priv->num_segs), sizeof(unsigned long), GFP_KERNEL); - if (!pil_seg_data) + if (!err_map) return -ENOMEM; + pil_seg_data = kcalloc(priv->num_segs, sizeof(*pil_seg_data), + GFP_KERNEL); + if (!pil_seg_data) { + ret = -ENOMEM; + goto out; + } + /* Initialize and spawn a thread for each segment */ list_for_each_entry(seg, &desc->priv->segs, list) { - pil_seg_data[seg_id].seg_id = seg_id; pil_seg_data[seg_id].desc = desc; pil_seg_data[seg_id].seg = seg; @@ -964,9 +970,11 @@ static int pil_load_segs(struct pil_desc *desc) /* Each segment can fail due to different reason. Send a generic err */ if (!bitmap_empty(err_map, priv->num_segs)) - return -EFAULT; + ret = -EFAULT; - return 0; +out: + kfree(err_map); + return ret; } /** diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c index 8728f42d3aa8fcd7a0c3843e92f11c8f2d25b0a8..44c035fbe06982557021291ae46d9510e45c8f7b 100644 --- a/drivers/soc/qcom/qmi_interface.c +++ b/drivers/soc/qcom/qmi_interface.c @@ -324,7 +324,7 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, txn->dest = c_struct; mutex_lock(&qmi->txn_lock); - ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL); + ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL); if (ret < 0) pr_err("failed to allocate transaction id\n"); @@ -483,6 +483,9 @@ static void qmi_handle_message(struct qmi_handle *qmi, struct qmi_txn *txn = NULL; int ret; + if (!len) + return; + if (len < sizeof(*hdr)) { pr_err("ignoring short QMI packet\n"); return; diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c index c38d4c92de35f34e471e21811a04e12840856174..d1c4eb7992d50810b4ff662bce5399ed1a30dfcb 100644 --- a/drivers/soc/qcom/qmi_rmnet.c +++ b/drivers/soc/qcom/qmi_rmnet.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #define NLMSG_FLOW_ACTIVATE 1 #define NLMSG_FLOW_DEACTIVATE 2 @@ -35,6 +37,7 @@ unsigned int rmnet_wq_frequency __read_mostly = 4; module_param(rmnet_wq_frequency, uint, 0644); MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check"); +#define PS_WORK_ACTIVE_BIT 0 #define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ) #define NO_DELAY (0x0000 * HZ) @@ -100,38 +103,6 @@ qmi_rmnet_has_client(struct qmi_info *qmi) } #ifdef CONFIG_QCOM_QMI_DFC -static void -qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev, - struct rmnet_flow_map *itm, int add_flow) -{ - int i; - - if (add_flow) { - if (qmi->flow_cnt == MAX_FLOW_NUM - 1) { - pr_err("%s() No more space for new flow\n", __func__); - return; - } - - qmi->flow[qmi->flow_cnt].dev = dev; - qmi->flow[qmi->flow_cnt].itm = itm; - qmi->flow_cnt++; - } else { - for (i = 0; i < qmi->flow_cnt; i++) { - if ((qmi->flow[i].dev == dev) && - (qmi->flow[i].itm == itm)) { - qmi->flow[i].dev = - qmi->flow[qmi->flow_cnt-1].dev; - qmi->flow[i].itm = - qmi->flow[qmi->flow_cnt-1].itm; - qmi->flow[qmi->flow_cnt-1].dev = NULL; - qmi->flow[qmi->flow_cnt-1].itm = NULL; - qmi->flow_cnt--; - break; - } - } - } -} - static void qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev, struct qos_info *qos) @@ -142,7 +113,6 @@ qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev, ASSERT_RTNL(); list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) { - qmi_rmnet_update_flow_link(qmi, dev, itm, 0); list_del(&itm->list); kfree(itm); } @@ -232,7 +202,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, new_map.flow_id = tcm->tcm_parent; new_map.ip_type = tcm->tcm_ifindex; new_map.tcm_handle = tcm->tcm_handle; - trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id, + trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id, new_map.ip_type, new_map.tcm_handle, 1); spin_lock_bh(&qos_info->qos_lock); @@ -248,7 +218,6 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, return -ENOMEM; } - qmi_rmnet_update_flow_link(qmi, dev, itm, 1); qmi_rmnet_update_flow_map(itm, &new_map); list_add(&itm->list, &qos_info->flow_head); @@ -273,6 +242,9 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, qmi_rmnet_flow_control(dev, itm->tcm_handle, bearer->grant_size > 0 ? 1 : 0); + + trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id, + bearer->grant_size, 0, itm->tcm_handle, 1); } spin_unlock_bh(&qos_info->qos_lock); @@ -287,7 +259,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); struct rmnet_flow_map new_map, *itm; struct rmnet_bearer_map *bearer; - int bearer_removed = 0; if (!qos_info) return -EINVAL; @@ -307,65 +278,33 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, new_map.ip_type); if (itm) { - trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id, - new_map.ip_type, itm->tcm_handle, 0); - qmi_rmnet_update_flow_link(qmi, dev, itm, 0); + trace_dfc_flow_info(dev->name, new_map.bearer_id, + new_map.flow_id, new_map.ip_type, + itm->tcm_handle, 0); list_del(&itm->list); + /* Enable flow to allow new call setup */ + qmi_rmnet_flow_control(dev, itm->tcm_handle, 1); + trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id, + 0, 0, itm->tcm_handle, 1); + /*clear bearer map*/ bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id); if (bearer && --bearer->flow_ref == 0) { list_del(&bearer->list); - bearer_removed = 1; + kfree(bearer); } kfree(itm); - if (bearer_removed) - kfree(bearer); } - spin_unlock_bh(&qos_info->qos_lock); - - return 0; -} - -static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi) -{ - int i; - struct qos_info *qos; - struct rmnet_flow_map *m; - struct rmnet_bearer_map *bearer; - int qlen; - - if (!qmi_rmnet_has_dfc_client(qmi) || (qmi->flow_cnt == 0)) - return 0; - - ASSERT_RTNL(); - - for (i = 0; i < qmi->flow_cnt; i++) { - qos = (struct qos_info *)rmnet_get_qos_pt(qmi->flow[i].dev); - m = qmi->flow[i].itm; - - spin_lock_bh(&qos->qos_lock); - - bearer = qmi_rmnet_get_bearer_map(qos, m->bearer_id); - if (bearer) { - bearer->grant_size = DEFAULT_GRANT; - bearer->grant_thresh = - qmi_rmnet_grant_per(DEFAULT_GRANT); - bearer->seq = 0; - bearer->ack_req = 0; - } - - qlen = qmi_rmnet_flow_control(qmi->flow[i].dev, - m->tcm_handle, 1); - trace_dfc_qmi_tc(m->bearer_id, m->flow_id, - DEFAULT_GRANT, qlen, - m->tcm_handle, 1); - - spin_unlock_bh(&qos->qos_lock); + if (list_empty(&qos_info->flow_head)) { + netif_tx_wake_all_queues(dev); + trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1); } + spin_unlock_bh(&qos_info->qos_lock); + return 0; } @@ -419,11 +358,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, { return -EINVAL; } - -static inline int qmi_rmnet_enable_all_flows(struct qmi_info *qmi) -{ - return 0; -} #endif static int @@ -568,13 +502,13 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) ASSERT_RTNL(); + qmi_rmnet_work_exit(port); + if (qmi->wda_client) { wda_qmi_client_exit(qmi->wda_client); qmi->wda_client = NULL; } - qmi_rmnet_work_exit(port); - for (i = 0; i < MAX_CLIENT_NUM; i++) { if (!__qmi_rmnet_delete_client(port, qmi, i)) return; @@ -582,6 +516,38 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) } EXPORT_SYMBOL(qmi_rmnet_qmi_exit); +void qmi_rmnet_enable_all_flows(struct net_device *dev) +{ + struct qos_info *qos; + struct rmnet_bearer_map *bearer; + int do_wake = 0; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + return; + + spin_lock_bh(&qos->qos_lock); + + list_for_each_entry(bearer, &qos->bearer_head, list) { + bearer->grant_before_ps = bearer->grant_size; + bearer->seq_before_ps = bearer->seq; + bearer->grant_size = DEFAULT_GRANT; + bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT); + bearer->seq = 0; + bearer->ack_req = 0; + bearer->ancillary = 0; + do_wake = 1; + } + + if (do_wake) { + netif_tx_wake_all_queues(dev); + trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1); + } + + spin_unlock_bh(&qos->qos_lock); +} +EXPORT_SYMBOL(qmi_rmnet_enable_all_flows); + #ifdef CONFIG_QCOM_QMI_DFC void qmi_rmnet_burst_fc_check(struct net_device *dev, int ip_type, u32 mark, unsigned int len) @@ -595,6 +561,59 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev, } EXPORT_SYMBOL(qmi_rmnet_burst_fc_check); +int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct qos_info *qos = rmnet_get_qos_pt(dev); + int txq = 0, ip_type = AF_INET; + unsigned int len = skb->len; + struct rmnet_flow_map *itm; + u32 mark = skb->mark; + + if (!qos) + return 0; + + switch (skb->protocol) { + /* TCPv4 ACKs */ + case htons(ETH_P_IP): + ip_type = AF_INET; + if ((!mark) && + (ip_hdr(skb)->protocol == IPPROTO_TCP) && + (len == 40 || len == 52) && + (ip_hdr(skb)->ihl == 5) && + ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK)) + return 1; + break; + + /* TCPv6 ACKs */ + case htons(ETH_P_IPV6): + ip_type = AF_INET6; + if ((!mark) && + (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && + (len == 60 || len == 72) && + ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK)) + return 1; + /* Fall through */ + } + + /* Default flows */ + if (!mark) + return 0; + + /* Dedicated flows */ + spin_lock_bh(&qos->qos_lock); + + itm = qmi_rmnet_get_flow_map(qos, mark, ip_type); + if (unlikely(!itm)) + goto done; + + txq = itm->tcm_handle; + +done: + spin_unlock_bh(&qos->qos_lock); + return txq; +} +EXPORT_SYMBOL(qmi_rmnet_get_queue); + inline unsigned int qmi_rmnet_grant_per(unsigned int grant) { return grant / qmi_rmnet_scale_factor; @@ -639,6 +658,7 @@ EXPORT_SYMBOL(qmi_rmnet_qos_exit); #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE static struct workqueue_struct *rmnet_ps_wq; static struct rmnet_powersave_work *rmnet_work; +static struct list_head ps_list; struct rmnet_powersave_work { struct delayed_work work; @@ -647,6 +667,58 @@ struct rmnet_powersave_work { u64 old_tx_pkts; }; +void qmi_rmnet_ps_on_notify(void *port) +{ + struct qmi_rmnet_ps_ind *tmp; + + list_for_each_entry(tmp, &ps_list, list) + tmp->ps_on_handler(port); +} +EXPORT_SYMBOL(qmi_rmnet_ps_on_notify); + +void qmi_rmnet_ps_off_notify(void *port) +{ + struct qmi_rmnet_ps_ind *tmp; + + list_for_each_entry(tmp, &ps_list, list) + tmp->ps_off_handler(port); +} +EXPORT_SYMBOL(qmi_rmnet_ps_off_notify); + +int qmi_rmnet_ps_ind_register(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + + if (!port || !ps_ind || !ps_ind->ps_on_handler || + !ps_ind->ps_off_handler) + return -EINVAL; + + list_add_rcu(&ps_ind->list, &ps_list); + + return 0; +} +EXPORT_SYMBOL(qmi_rmnet_ps_ind_register); + +int qmi_rmnet_ps_ind_deregister(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + struct qmi_rmnet_ps_ind *tmp; + + if (!port || !ps_ind) + return -EINVAL; + + list_for_each_entry(tmp, &ps_list, list) { + if (tmp == ps_ind) { + list_del_rcu(&ps_ind->list); + goto done; + } + } + +done: + return 0; +} +EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister); + int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable) { int rc = -EINVAL; @@ -661,8 +733,9 @@ int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable) __func__, enable, rc); return rc; } + if (enable) - qmi_rmnet_enable_all_flows(qmi); + dfc_qmi_wq_flush(qmi); return 0; } @@ -679,9 +752,9 @@ EXPORT_SYMBOL(qmi_rmnet_work_restart); static void qmi_rmnet_check_stats(struct work_struct *work) { struct rmnet_powersave_work *real_work; + struct qmi_info *qmi; u64 rxd, txd; u64 rx, tx; - unsigned long lock_delay; real_work = container_of(to_delayed_work(work), struct rmnet_powersave_work, work); @@ -689,28 +762,27 @@ static void qmi_rmnet_check_stats(struct work_struct *work) if (unlikely(!real_work || !real_work->port)) return; - /* Min Delay for retry errors */ - lock_delay = qmi_rmnet_work_get_active(real_work->port) ? - PS_INTERVAL : (HZ / 50); - - if (!rtnl_trylock()) { - queue_delayed_work(rmnet_ps_wq, &real_work->work, lock_delay); + qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port); + if (unlikely(!qmi)) return; - } - if (!qmi_rmnet_work_get_active(real_work->port)) { - qmi_rmnet_work_set_active(real_work->port, 1); + + if (qmi->ps_enabled) { /* Retry after small delay if qmi error * This resumes UL grants by disabling * powersave mode if successful. */ if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) { - qmi_rmnet_work_set_active(real_work->port, 0); queue_delayed_work(rmnet_ps_wq, - &real_work->work, lock_delay); - rtnl_unlock(); + &real_work->work, HZ / 50); return; } + qmi->ps_enabled = 0; + + if (rmnet_get_powersave_notif(real_work->port)) + qmi_rmnet_ps_off_notify(real_work->port); + + goto end; } @@ -721,26 +793,42 @@ static void qmi_rmnet_check_stats(struct work_struct *work) real_work->old_tx_pkts = tx; if (!rxd && !txd) { - qmi_rmnet_work_set_active(real_work->port, 0); - /* Retry after lock delay if enabling powersave fails. - * This will cause UL grants to continue being sent - * suboptimally. Keeps wq active until successful. - */ if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) { - qmi_rmnet_work_set_active(real_work->port, 1); queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL); - + return; } + qmi->ps_enabled = 1; + clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); + + /* Enable flow after clear the bit so a new + * work can be triggered. + */ + rmnet_enable_all_flows(real_work->port); + + if (rmnet_get_powersave_notif(real_work->port)) + qmi_rmnet_ps_on_notify(real_work->port); - rtnl_unlock(); return; } end: - rtnl_unlock(); queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL); } +static void qmi_rmnet_work_set_active(void *port, int status) +{ + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) + return; + + if (status) + set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); + else + clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); +} + void qmi_rmnet_work_init(void *port) { if (rmnet_ps_wq) @@ -755,7 +843,7 @@ void qmi_rmnet_work_init(void *port) rmnet_ps_wq = NULL; return; } - + INIT_LIST_HEAD(&ps_list); INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats); rmnet_work->port = port; rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts, @@ -766,29 +854,26 @@ void qmi_rmnet_work_init(void *port) } EXPORT_SYMBOL(qmi_rmnet_work_init); -void qmi_rmnet_work_set_active(void *port, int status) +void qmi_rmnet_work_maybe_restart(void *port) { - if (!port) + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) return; - ((struct qmi_info *)rmnet_get_qmi_pt(port))->active = status; -} -EXPORT_SYMBOL(qmi_rmnet_work_set_active); -int qmi_rmnet_work_get_active(void *port) -{ - if (!port) - return 0; - return ((struct qmi_info *)rmnet_get_qmi_pt(port))->active; + if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) + qmi_rmnet_work_restart(port); } -EXPORT_SYMBOL(qmi_rmnet_work_get_active); +EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart); void qmi_rmnet_work_exit(void *port) { - qmi_rmnet_work_set_active(port, 0); if (!rmnet_ps_wq || !rmnet_work) return; cancel_delayed_work_sync(&rmnet_work->work); destroy_workqueue(rmnet_ps_wq); + qmi_rmnet_work_set_active(port, 0); rmnet_ps_wq = NULL; kfree(rmnet_work); rmnet_work = NULL; diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h index ab52ee208a8cbd7d0967a1335b31d14042e97c89..2f7c262d26e6580331505c64bf2124ac605a3285 100644 --- a/drivers/soc/qcom/qmi_rmnet_i.h +++ b/drivers/soc/qcom/qmi_rmnet_i.h @@ -40,6 +40,9 @@ struct rmnet_bearer_map { u32 grant_thresh; u16 seq; u8 ack_req; + u32 grant_before_ps; + u16 seq_before_ps; + u32 ancillary; }; struct svc_info { @@ -70,11 +73,10 @@ struct flow_info { struct qmi_info { int flag; - int flow_cnt; - struct flow_info flow[MAX_FLOW_NUM]; void *wda_client; struct fc_info fc_info[MAX_CLIENT_NUM]; - int active; + unsigned long ps_work_active; + int ps_enabled; }; enum data_ep_type_enum_v01 { @@ -116,6 +118,8 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable); +void dfc_qmi_wq_flush(struct qmi_info *qmi); + #else static inline struct rmnet_flow_map * qmi_rmnet_get_flow_map(struct qos_info *qos_info, @@ -145,6 +149,11 @@ dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, int ip_type, u32 mark, unsigned int len) { } + +static inline void +dfc_qmi_wq_flush(struct qmi_info *qmi) +{ +} #endif #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE diff --git a/drivers/soc/qcom/qpnp-pbs.c b/drivers/soc/qcom/qpnp-pbs.c index 287c8a25b3912d14655207712e8e7173adb3d206..31f351aab2c5e97753974099963d79dae05181f6 100644 --- a/drivers/soc/qcom/qpnp-pbs.c +++ b/drivers/soc/qcom/qpnp-pbs.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -121,9 +121,10 @@ static int qpnp_pbs_wait_for_ack(struct qpnp_pbs *pbs, u8 bit_pos) } if (val == 0xFF) { + val = 0; /* PBS error - clear SCRATCH2 register */ rc = qpnp_pbs_write(pbs, pbs->base + - PBS_CLIENT_SCRATCH2, 0, 1); + PBS_CLIENT_SCRATCH2, &val, 1); if (rc < 0) { pr_err("Failed to clear register %x rc=%d\n", PBS_CLIENT_SCRATCH2, rc); @@ -198,8 +199,10 @@ int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap) } if (val == 0xFF) { + val = 0; /* PBS error - clear SCRATCH2 register */ - rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, 0, 1); + rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, &val, + 1); if (rc < 0) { pr_err("Failed to clear register %x rc=%d\n", PBS_CLIENT_SCRATCH2, rc); diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 0a9d18e196336c7ead0ea692924e2debbb8cf241..1ce422322f1ae36a6aea28502d7f4801fff6e54e 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -248,10 +248,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table, #define BATCH_MAX_SIZE SZ_2M #define BATCH_MAX_SECTIONS 32 -int hyp_assign_table(struct sg_table *table, +/* + * When -EAGAIN is returned it is safe for the caller to try to call + * __hyp_assign_table again. + * + * When -EADDRNOTAVAIL is returned the memory may no longer be in + * a usable state and should no longer be accessed by the HLOS. + */ +static int __hyp_assign_table(struct sg_table *table, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, - int dest_nelems) + int dest_nelems, bool try_lock) { int ret = 0; struct scm_desc desc = {0}; @@ -281,10 +288,17 @@ int hyp_assign_table(struct sg_table *table, &dest_vm_copy_size); if (!dest_vm_copy) { ret = -ENOMEM; - goto out_free; + goto out_free_src; } - mutex_lock(&secure_buffer_mutex); + if (try_lock) { + if (!mutex_trylock(&secure_buffer_mutex)) { + ret = -EAGAIN; + goto out_free_dest; + } + } else { + mutex_lock(&secure_buffer_mutex); + } sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size); if (!sg_table_copy) { @@ -340,6 +354,12 @@ int hyp_assign_table(struct sg_table *table, if (ret) { pr_info("%s: Failed to assign memory protection, ret = %d\n", __func__, ret); + + /* + * Make it clear to clients that the memory may no + * longer be in a usable state. + */ + ret = -EADDRNOTAVAIL; break; } batch_start = batch_end; @@ -347,12 +367,31 @@ int hyp_assign_table(struct sg_table *table, out_unlock: mutex_unlock(&secure_buffer_mutex); +out_free_dest: kfree(dest_vm_copy); -out_free: +out_free_src: kfree(source_vm_copy); return ret; } +int hyp_assign_table(struct sg_table *table, + u32 *source_vm_list, int source_nelems, + int *dest_vmids, int *dest_perms, + int dest_nelems) +{ + return __hyp_assign_table(table, source_vm_list, source_nelems, + dest_vmids, dest_perms, dest_nelems, false); +} + +int try_hyp_assign_table(struct sg_table *table, + u32 *source_vm_list, int source_nelems, + int *dest_vmids, int *dest_perms, + int dest_nelems) +{ + return __hyp_assign_table(table, source_vm_list, source_nelems, + dest_vmids, dest_perms, dest_nelems, true); +} + int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, int dest_nelems) diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c index 065aa47b579a8384e470cee7cc4a313a56959a45..c15c2420d2275e7845e15eb01167335a70220f63 100644 --- a/drivers/soc/qcom/smcinvoke.c +++ b/drivers/soc/qcom/smcinvoke.c @@ -790,7 +790,7 @@ static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len) ob->p_addr = mem_obj->p_addr; ob->len = mem_obj->p_addr_len; ob->perms = SMCINVOKE_MEM_PERM_RW; - *oo = mem_obj->mem_map_obj_id; + *oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id); out: if (ret != OBJECT_OK) kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 93949de7b0ddcd4a24f48de38744599e7779fecb..fba9c25e043b70f0cce5f65e2181ea0cec162092 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -623,13 +623,13 @@ static int qcom_smp2p_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq, NULL, qcom_smp2p_intr, - IRQF_ONESHOT, + IRQF_NO_SUSPEND | IRQF_ONESHOT, "smp2p", (void *)smp2p); if (ret) { dev_err(&pdev->dev, "failed to request interrupt\n"); goto unwind_interfaces; } - + enable_irq_wake(smp2p->irq); return 0; diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c index bd81703339f4b6a14b34ed0b038f2b516fb94dbc..443f4c23b4fa9ad1246409eeb94160083fb10962 100644 --- a/drivers/soc/qcom/smp2p_sleepstate.c +++ b/drivers/soc/qcom/smp2p_sleepstate.c @@ -74,26 +74,32 @@ static int smp2p_sleepstate_probe(struct platform_device *pdev) ret = register_pm_notifier(&sleepstate_pm_nb); if (ret) - pr_err("%s: power state notif error %d\n", __func__, ret); + dev_err(&pdev->dev, "%s: power state notif error %d\n", + __func__, ret); wakeup_source_init(¬ify_ws, "smp2p-sleepstate"); irq = of_irq_get_byname(node, "smp2p-sleepstate-in"); if (irq <= 0) { - pr_err("failed for irq getbyname for smp2p_sleep_state\n"); - wakeup_source_trash(¬ify_ws); - return -EPROBE_DEFER; + dev_err(&pdev->dev, + "failed for irq getbyname for smp2p_sleep_state\n"); + ret = -EPROBE_DEFER; + goto err; } - pr_info("got smp2p-sleepstate-in irq %d\n", irq); + dev_info(&pdev->dev, "got smp2p-sleepstate-in irq %d\n", irq); ret = devm_request_threaded_irq(dev, irq, NULL, (irq_handler_t)smp2p_sleepstate_handler, IRQF_TRIGGER_RISING, "smp2p_sleepstate", dev); if (ret) { - pr_err("fail to register smp2p threaded_irq=%d\n", irq); - wakeup_source_trash(¬ify_ws); - return ret; + dev_err(&pdev->dev, "fail to register smp2p threaded_irq=%d\n", + irq); + goto err; } return 0; +err: + wakeup_source_trash(¬ify_ws); + unregister_pm_notifier(&sleepstate_pm_nb); + return ret; } static const struct of_device_id smp2p_slst_match_table[] = { diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index e752ad57a83a9887681379fb8e2359cee128d598..7da1a1d976b5d640f575af4c073fb7999db9b04a 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -350,6 +350,9 @@ static struct msm_soc_info cpu_of_id[] = { /* sdmmagpie ID */ [365] = {MSM_CPU_SDMMAGPIE, "SDMMAGPIE"}, + /* trinket ID */ + [394] = {MSM_CPU_TRINKET, "TRINKET"}, + /* Uninitialized IDs are not known to run Linux. * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are * considered as unknown CPU. @@ -1249,6 +1252,10 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 365; strlcpy(dummy_socinfo.build_id, "sdmmagpie - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_trinket()) { + dummy_socinfo.id = 394; + strlcpy(dummy_socinfo.build_id, "trinket - ", + sizeof(dummy_socinfo.build_id)); } else strlcat(dummy_socinfo.build_id, "Dummy socinfo", sizeof(dummy_socinfo.build_id)); diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index 891f94113d1405dc016cef1409939e7e575eab9b..a19248f07c03dba9fd3db77474e907295831c463 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -1706,6 +1706,7 @@ static int spcom_create_channel_chardev(const char *name) pr_err("can't unregister rpmsg drv\n", ret); exit_destroy_channel: // empty channel leaves free slot for next time + mutex_lock(&ch->lock); memset(ch->name, 0, SPCOM_CHANNEL_NAME_SIZE); mutex_unlock(&ch->lock); return -EFAULT; diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c index cb603b4d053c1fbf5b6698a404690fc137ce65f6..985a2c0951d0d4275a8792acee98a5d5523a989d 100644 --- a/drivers/soc/qcom/sysmon-qmi.c +++ b/drivers/soc/qcom/sysmon-qmi.c @@ -79,6 +79,7 @@ static LIST_HEAD(sysmon_list); static DEFINE_MUTEX(sysmon_list_lock); static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = { + [0 ... SUBSYS_NOTIF_TYPE_COUNT - 1] = SSCTL_SSR_EVENT_INVALID, [SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP, [SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP, [SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN, @@ -118,6 +119,11 @@ static struct qmi_msg_handler qmi_indication_handler[] = { {} }; +static bool is_ssctl_event(enum subsys_notif_type notif) +{ + return notif_map[notif] != SSCTL_SSR_EVENT_INVALID; +} + static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc) { struct sysmon_qmi_data *data = container_of(qmi, @@ -258,8 +264,8 @@ int sysmon_send_event(struct subsys_desc *dest_desc, int ret; struct qmi_txn txn; - if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL - || dest_ss == NULL) + if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || + !is_ssctl_event(notif) || event_ss == NULL || dest_ss == NULL) return -EINVAL; mutex_lock(&sysmon_list_lock); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index a03599b91bf3dc21401b602aa1f21aa407bd8d2b..09bc010c258df31a8fba42db20d0b93f12ddbc62 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -357,9 +357,6 @@ static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer, if (mode & SPI_CPHA) flags |= GSI_CPHA; - if (xfer->cs_change) - flags |= GSI_CS_TOGGLE; - word_len = xfer->bits_per_word - MIN_WORD_LEN; pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN); ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div); @@ -591,8 +588,11 @@ static int setup_gsi_xfer(struct spi_transfer *xfer, } cs |= spi_slv->chip_select; - if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers)) - go_flags |= FRAGMENTATION; + if (!xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, + &spi->cur_msg->transfers)) + go_flags |= FRAGMENTATION; + } go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas); sg_init_table(xfer_tx_sg, tx_nent); @@ -955,8 +955,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, m_cmd = SPI_RX_ONLY; spi_tx_cfg &= ~CS_TOGGLE; - if (xfer->cs_change) - spi_tx_cfg |= CS_TOGGLE; if (!(mas->cur_word_len % MIN_WORD_LEN)) { trans_len = ((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK; @@ -965,8 +963,12 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK; } - if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers)) - m_param |= FRAGMENTATION; + + if (!xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, + &spi->cur_msg->transfers)) + m_param |= FRAGMENTATION; + } mas->cur_xfer = xfer; if (m_cmd & SPI_TX_ONLY) { @@ -1022,7 +1024,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, } } - /* Ensure all writes are done before the WM interrupt */ mb(); } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index cda10719d1d1b21b32866d2b79363faa461ab8e1..2aa90648c688a19c9b53ce1bac9c75781b3cf841 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -669,6 +669,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "lineartechnology,ltc2488" }, { .compatible = "ge,achc" }, { .compatible = "semtech,sx1301" }, + { .compatible = "qcom,spi-msm-codec-slave" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 6985e6cc434ef07c682b589b48f3b617ab5ede57..5257b814417a5d7f852eb11f2633500a1f2d4943 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -645,8 +645,12 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) type.type |= BIT(irq); if (flow_type & IRQF_TRIGGER_RISING) type.polarity_high |= BIT(irq); + else + type.polarity_high &= ~BIT(irq); if (flow_type & IRQF_TRIGGER_FALLING) type.polarity_low |= BIT(irq); + else + type.polarity_low &= ~BIT(irq); flow_handler = handle_edge_irq; } else { @@ -655,10 +659,13 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) return -EINVAL; type.type &= ~BIT(irq); /* level trig */ - if (flow_type & IRQF_TRIGGER_HIGH) + if (flow_type & IRQF_TRIGGER_HIGH) { type.polarity_high |= BIT(irq); - else + type.polarity_low &= ~BIT(irq); + } else { type.polarity_low |= BIT(irq); + type.polarity_high &= ~BIT(irq); + } flow_handler = handle_level_irq; } diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 492df79784860874b9a3358132ddb264cf842c8c..563e4a82a73a1be03595a819be4a587759f094d7 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -55,3 +55,11 @@ config ION_FORCE_DMA_SYNC We generally don't want to enable this config as it breaks the cache maintenance model. If you're not sure say N here. + +config ION_DEFER_FREE_NO_SCHED_IDLE + bool "Increases the priority of ION defer free thead" + depends on ION + help + Choose this option to remove the SCHED_IDLE flag in case of defer free + thereby increasing the priority of defer free thread. + if you're not sure say Y here. diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index fb0d1f61fe86e8949d86ebcfb4c0cadcf4a55f7e..2240c4ae61ce28f97177dfd2ddb4796853288a69 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -126,6 +126,9 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) goto err2; + if (ret == -EINTR) + goto err2; + ion_heap_freelist_drain(heap, 0); ret = heap->ops->allocate(heap, buffer, len, flags); if (ret) @@ -559,8 +562,10 @@ static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl, sg_dma_addr = sg_dma_address(sg); len += sg->length; - if (len <= offset) + if (len <= offset) { + sg_dma_addr += sg->length; continue; + } sg_left = len - offset; sg_offset = sg->length - sg_left; @@ -1062,7 +1067,7 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask, if (!((1 << heap->id) & heap_id_mask)) continue; buffer = ion_buffer_create(heap, dev, len, flags); - if (!IS_ERR(buffer)) + if (!IS_ERR(buffer) || PTR_ERR(buffer) == -EINTR) break; } up_read(&dev->lock); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index a3ddd237ce4b1614067677f148f362f37a9c9041..b187ff340094361466ccf452b075e3cd55079412 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -250,8 +250,9 @@ static int ion_heap_deferred_free(void *data) int ion_heap_init_deferred_free(struct ion_heap *heap) { +#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE struct sched_param param = { .sched_priority = 0 }; - +#endif INIT_LIST_HEAD(&heap->free_list); init_waitqueue_head(&heap->waitqueue); heap->task = kthread_run(ion_heap_deferred_free, heap, @@ -261,7 +262,9 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) __func__); return PTR_ERR_OR_ZERO(heap->task); } +#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE sched_setscheduler(heap->task, SCHED_IDLE, ¶m); +#endif return 0; } diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index b1efb3958a871e16c11bfbb2b9aaa9519abceaf9..2568a5184865f62d2636efe779019cca275cdeed 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "ion.h" @@ -81,6 +82,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool) BUG_ON(!pool); + if (fatal_signal_pending(current)) + return ERR_PTR(-EINTR); + if (*from_pool && mutex_trylock(&pool->mutex)) { if (pool->high_count) page = ion_page_pool_remove(pool, true); @@ -92,6 +96,9 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool) page = ion_page_pool_alloc_pages(pool); *from_pool = false; } + + if (!page) + return ERR_PTR(-ENOMEM); return page; } @@ -103,7 +110,7 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool) struct page *page = NULL; if (!pool) - return NULL; + return ERR_PTR(-EINVAL); if (mutex_trylock(&pool->mutex)) { if (pool->high_count) @@ -113,6 +120,8 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool) mutex_unlock(&pool->mutex); } + if (!page) + return ERR_PTR(-ENOMEM); return page; } diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c index 93fafcac33acbb3679c41e60c74d2a41256170cb..cb8f2cc469886b09f684a406b7b93dff2c6b1f8d 100644 --- a/drivers/staging/android/ion/ion_secure_util.c +++ b/drivers/staging/android/ion/ion_secure_util.c @@ -99,7 +99,8 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list, } int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, - int source_nelems, bool clear_page_private) + int source_nelems, bool clear_page_private, + bool try_lock) { u32 dest_vmid = VMID_HLOS; u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC; @@ -113,11 +114,16 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, goto out; } - ret = hyp_assign_table(sgt, source_vm_list, source_nelems, - &dest_vmid, &dest_perms, 1); + if (try_lock) + ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems, + &dest_vmid, &dest_perms, 1); + else + ret = hyp_assign_table(sgt, source_vm_list, source_nelems, + &dest_vmid, &dest_perms, 1); if (ret) { - pr_err("%s: Unassign call failed.\n", - __func__); + if (!try_lock) + pr_err("%s: Unassign call failed.\n", + __func__); goto out; } if (clear_page_private) @@ -193,7 +199,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags, } ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems, - set_page_private); + set_page_private, false); out_free_source: kfree(source_vm_list); diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h index 7947452daa48f0a102b53a56de6017a148e39de4..f6d00d9d9b7044d9209b6849d741ebcdba1c9c03 100644 --- a/drivers/staging/android/ion/ion_secure_util.h +++ b/drivers/staging/android/ion/ion_secure_util.h @@ -20,7 +20,8 @@ bool is_secure_vmid_valid(int vmid); int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list, int dest_nelems, bool set_page_private); int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, - int source_nelems, bool clear_page_private); + int source_nelems, bool clear_page_private, + bool try_lock); int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags, bool set_page_private); int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags, diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 93b30e68b969964cce2a7aee5d39be65dc771c4e..960fc18ba9ab4a17dbc101f62969aa3b8aff6ad7 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -81,8 +81,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, page = ion_page_pool_alloc(pool, from_pool); - if (!page) - return 0; + if (IS_ERR(page)) + return page; if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool)) ion_pages_sync_for_device(dev, page, PAGE_SIZE << order, @@ -133,7 +133,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) - return NULL; + return ERR_PTR(-ENOMEM); for (i = 0; i < NUM_ORDERS; i++) { if (size < order_to_size(orders[i])) @@ -142,7 +142,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, continue; from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC); page = alloc_buffer_page(heap, buffer, orders[i], &from_pool); - if (!page) + if (IS_ERR(page)) continue; info->page = page; @@ -153,7 +153,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, } kfree(info); - return NULL; + return ERR_PTR(-ENOMEM); } static struct page_info *alloc_from_pool_preferred( @@ -166,7 +166,7 @@ static struct page_info *alloc_from_pool_preferred( info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) - return NULL; + return ERR_PTR(-ENOMEM); for (i = 0; i < NUM_ORDERS; i++) { if (size < order_to_size(orders[i])) @@ -175,7 +175,7 @@ static struct page_info *alloc_from_pool_preferred( continue; page = alloc_from_secure_pool_order(heap, buffer, orders[i]); - if (!page) + if (IS_ERR(page)) continue; info->page = page; @@ -186,7 +186,7 @@ static struct page_info *alloc_from_pool_preferred( } page = split_page_from_secure_pool(heap, buffer); - if (page) { + if (!IS_ERR(page)) { info->page = page; info->order = 0; info->from_pool = true; @@ -270,7 +270,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, struct sg_table table_sync = {0}; struct scatterlist *sg; struct scatterlist *sg_sync; - int ret; + int ret = -ENOMEM; struct list_head pages; struct list_head pages_from_pool; struct page_info *info, *tmp_info; @@ -306,8 +306,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap, sys_heap, buffer, size_remaining, max_order); - if (!info) + if (IS_ERR(info)) { + ret = PTR_ERR(info); goto err; + } sz = (1 << info->order) * PAGE_SIZE; @@ -389,10 +391,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap, err_free_sg2: /* We failed to zero buffers. Bypass pool */ - buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE; + buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; if (vmid > 0) - ion_hyp_unassign_sg(table, &vmid, 1, true); + ion_hyp_unassign_sg(table, &vmid, 1, true, false); for_each_sg(table->sgl, sg, table->nents, i) free_buffer_page(sys_heap, buffer, sg_page(sg), @@ -414,7 +416,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, free_buffer_page(sys_heap, buffer, info->page, info->order); kfree(info); } - return -ENOMEM; + return ret; } void ion_system_heap_free(struct ion_buffer *buffer) @@ -433,7 +435,7 @@ void ion_system_heap_free(struct ion_buffer *buffer) if (vmid < 0) ion_heap_buffer_zero(buffer); } else if (vmid > 0) { - if (ion_hyp_unassign_sg(table, &vmid, 1, true)) + if (ion_hyp_unassign_sg(table, &vmid, 1, true, false)) return; } diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c index 12bb465d2fe691edbae0dd252f1b10259cfabff2..64cf97a3ae238801df6bf07ad620209a9b5d7815 100644 --- a/drivers/staging/android/ion/ion_system_secure_heap.c +++ b/drivers/staging/android/ion/ion_system_secure_heap.c @@ -160,7 +160,8 @@ size_t ion_system_secure_heap_page_pool_total(struct ion_heap *heap, return total << PAGE_SHIFT; } -static void process_one_shrink(struct ion_heap *sys_heap, +static void process_one_shrink(struct ion_system_secure_heap *secure_heap, + struct ion_heap *sys_heap, struct prefetch_info *info) { struct ion_buffer buffer; @@ -168,7 +169,7 @@ static void process_one_shrink(struct ion_heap *sys_heap, int ret; memset(&buffer, 0, sizeof(struct ion_buffer)); - buffer.heap = sys_heap; + buffer.heap = &secure_heap->heap; buffer.flags = info->vmid; pool_size = ion_system_secure_heap_page_pool_total(sys_heap, @@ -182,6 +183,7 @@ static void process_one_shrink(struct ion_heap *sys_heap, } buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE; + buffer.heap = sys_heap; sys_heap->ops->free(&buffer); } @@ -201,7 +203,7 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work) spin_unlock_irqrestore(&secure_heap->work_lock, flags); if (info->shrink) - process_one_shrink(sys_heap, info); + process_one_shrink(secure_heap, sys_heap, info); else process_one_prefetch(sys_heap, info); @@ -377,7 +379,7 @@ struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap, struct ion_page_pool *pool; if (!is_secure_vmid_valid(vmid)) - return NULL; + return ERR_PTR(-EINVAL); pool = heap->secure_pools[vmid][order_to_index(order)]; return ion_page_pool_alloc_pool_only(pool); @@ -399,13 +401,13 @@ struct page *split_page_from_secure_pool(struct ion_system_heap *heap, * possible. */ page = alloc_from_secure_pool_order(heap, buffer, 0); - if (page) + if (!IS_ERR(page)) goto got_page; for (i = NUM_ORDERS - 2; i >= 0; i--) { order = orders[i]; page = alloc_from_secure_pool_order(heap, buffer, order); - if (!page) + if (IS_ERR(page)) continue; split_page(page, order); @@ -415,7 +417,7 @@ struct page *split_page_from_secure_pool(struct ion_system_heap *heap, * Return the remaining order-0 pages to the pool. * SetPagePrivate flag to mark memory as secure. */ - if (page) { + if (!IS_ERR(page)) { for (j = 1; j < (1 << order); j++) { SetPagePrivate(page + j); free_buffer_page(heap, buffer, page + j, 0); @@ -444,7 +446,7 @@ int ion_secure_page_pool_shrink( while (freed < nr_to_scan) { page = ion_page_pool_alloc_pool_only(pool); - if (!page) + if (IS_ERR(page)) break; list_add(&page->lru, &pages); freed += (1 << order); @@ -463,7 +465,10 @@ int ion_secure_page_pool_shrink( sg = sg_next(sg); } - if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true)) + ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true); + if (ret == -EADDRNOTAVAIL) + goto out3; + else if (ret < 0) goto out2; list_for_each_entry_safe(page, tmp, &pages, lru) { @@ -474,6 +479,8 @@ int ion_secure_page_pool_shrink( sg_free_table(&sgt); return freed; +out2: + sg_free_table(&sgt); out1: /* Restore pages to secure pool */ list_for_each_entry_safe(page, tmp, &pages, lru) { @@ -481,7 +488,7 @@ int ion_secure_page_pool_shrink( ion_page_pool_free(pool, page); } return 0; -out2: +out3: /* * The security state of the pages is unknown after a failure; * They can neither be added back to the secure pool nor buddy system. diff --git a/drivers/thermal/qcom/cx_ipeak_cdev.c b/drivers/thermal/qcom/cx_ipeak_cdev.c index 0658e02d7be1305210ee95b93acbac1e6839daeb..2b6a4fb9df912acc3367ddb8f6ab7ae0a270c2b4 100644 --- a/drivers/thermal/qcom/cx_ipeak_cdev.c +++ b/drivers/thermal/qcom/cx_ipeak_cdev.c @@ -35,27 +35,49 @@ struct cxip_lm_cooling_device { struct thermal_cooling_device *cool_dev; char cdev_name[THERMAL_NAME_LENGTH]; void *cx_ip_reg_base; + unsigned int therm_clnt; + unsigned int *bypass_clnts; + unsigned int bypass_clnt_cnt; bool state; }; -static void cxip_lm_therm_vote_apply(void *reg_base, bool vote) +static void cxip_lm_therm_vote_apply(struct cxip_lm_cooling_device *cxip_dev, + bool vote) { - writel_relaxed(CXIP_LM_THERM_VOTE_VAL, - reg_base + - (vote ? CXIP_LM_VOTE_SET : CXIP_LM_VOTE_CLEAR)); + int vote_offset = 0, val = 0, sts_offset = 0; + + if (!cxip_dev->therm_clnt) { + vote_offset = vote ? CXIP_LM_VOTE_SET : CXIP_LM_VOTE_CLEAR; + val = CXIP_LM_THERM_VOTE_VAL; + sts_offset = CXIP_LM_VOTE_STATUS; + } else { + vote_offset = cxip_dev->therm_clnt; + val = vote ? 0x1 : 0x0; + sts_offset = vote_offset; + } - pr_debug("%s vote for cxip_lm. Agg.vote:0x%x\n", + writel_relaxed(val, cxip_dev->cx_ip_reg_base + vote_offset); + pr_debug("%s vote for cxip_lm. vote:0x%x\n", vote ? "Applied" : "Cleared", - readl_relaxed(reg_base + CXIP_LM_VOTE_STATUS)); + readl_relaxed(cxip_dev->cx_ip_reg_base + sts_offset)); } -static void cxip_lm_initialize_cxip_hw(void *reg_base) +static void cxip_lm_initialize_cxip_hw(struct cxip_lm_cooling_device *cxip_dev) { - /* Enable CXIP LM HW */ - writel_relaxed(CXIP_LM_FEATURE_EN_VAL, reg_base + CXIP_LM_FEATURE_EN); + int i = 0; /* Set CXIP LM proxy vote for clients who are not participating */ - writel_relaxed(CXIP_LM_BYPASS_VAL, reg_base + CXIP_LM_BYPASS); + if (cxip_dev->bypass_clnt_cnt) + for (i = 0; i < cxip_dev->bypass_clnt_cnt; i++) + writel_relaxed(0x1, cxip_dev->cx_ip_reg_base + + cxip_dev->bypass_clnts[i]); + else if (!cxip_dev->therm_clnt) + writel_relaxed(CXIP_LM_BYPASS_VAL, + cxip_dev->cx_ip_reg_base + CXIP_LM_BYPASS); + + /* Enable CXIP LM HW */ + writel_relaxed(CXIP_LM_FEATURE_EN_VAL, cxip_dev->cx_ip_reg_base + + CXIP_LM_FEATURE_EN); } static int cxip_lm_get_max_state(struct thermal_cooling_device *cdev, @@ -78,7 +100,7 @@ static int cxip_lm_set_cur_state(struct thermal_cooling_device *cdev, if (cxip_dev->state == state) return 0; - cxip_lm_therm_vote_apply(cxip_dev->cx_ip_reg_base, state); + cxip_lm_therm_vote_apply(cxip_dev, state); cxip_dev->state = state; return ret; @@ -117,6 +139,49 @@ static int cxip_lm_cdev_remove(struct platform_device *pdev) return 0; } +static int cxip_lm_get_devicetree_data(struct platform_device *pdev, + struct cxip_lm_cooling_device *cxip_dev, + struct device_node *np) +{ + int ret = 0; + + ret = of_property_read_u32(np, "qcom,thermal-client-offset", + &cxip_dev->therm_clnt); + if (ret) { + dev_dbg(&pdev->dev, + "error for qcom,thermal-client-offset. ret:%d\n", + ret); + cxip_dev->therm_clnt = 0; + ret = 0; + return ret; + } + + ret = of_property_count_u32_elems(np, "qcom,bypass-client-list"); + if (ret <= 0) { + dev_dbg(&pdev->dev, "Invalid number of clients err:%d\n", ret); + ret = 0; + return ret; + } + cxip_dev->bypass_clnt_cnt = ret; + + cxip_dev->bypass_clnts = devm_kcalloc(&pdev->dev, + cxip_dev->bypass_clnt_cnt, + sizeof(*cxip_dev->bypass_clnts), GFP_KERNEL); + if (!cxip_dev->bypass_clnts) + return -ENOMEM; + + ret = of_property_read_u32_array(np, "qcom,bypass-client-list", + cxip_dev->bypass_clnts, cxip_dev->bypass_clnt_cnt); + if (ret) { + dev_dbg(&pdev->dev, "bypass client list err:%d, cnt:%d\n", + ret, cxip_dev->bypass_clnt_cnt); + cxip_dev->bypass_clnt_cnt = 0; + ret = 0; + } + + return ret; +} + static int cxip_lm_cdev_probe(struct platform_device *pdev) { struct cxip_lm_cooling_device *cxip_dev = NULL; @@ -135,6 +200,10 @@ static int cxip_lm_cdev_probe(struct platform_device *pdev) if (!cxip_dev) return -ENOMEM; + ret = cxip_lm_get_devicetree_data(pdev, cxip_dev, np); + if (ret) + return ret; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, @@ -153,8 +222,7 @@ static int cxip_lm_cdev_probe(struct platform_device *pdev) /* Set thermal vote till we get first vote from TF */ cxip_dev->state = true; - cxip_lm_therm_vote_apply(cxip_dev->cx_ip_reg_base, - cxip_dev->state); + cxip_lm_therm_vote_apply(cxip_dev, cxip_dev->state); strlcpy(cxip_dev->cdev_name, np->name, THERMAL_NAME_LENGTH); cxip_dev->cool_dev = thermal_of_cooling_device_register( diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index 5873d944fd3405181a65f17d84e9ced0b736646b..f172a137da32871d5a25e066447d58dd556589e0 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -138,6 +138,28 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { "cpu-1-3-usr"}, .logic = VIRT_MAXIMUM, }, + { + .virt_zone_name = "cpu-0-max-step", + .num_sensors = 7, + .sensor_names = {"cpu-0-0-usr", + "cpu-0-1-usr", + "cpu-0-2-usr", + "cpu-0-3-usr", + "cpu-0-4-usr", + "cpu-0-5-usr", + "cpuss-0-usr"}, + .logic = VIRT_MAXIMUM, + }, + { + .virt_zone_name = "cpu-1-max-step", + .num_sensors = 5, + .sensor_names = {"cpu-1-0-usr", + "cpu-1-1-usr", + "cpu-1-2-usr", + "cpu-1-3-usr", + "cpuss-1-usr"}, + .logic = VIRT_MAXIMUM, + }, }; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 3015789265dd05d960ae5637d4b8ed7d0e410fb5..27c5b2b46b8dc0419a89fed9bd81b627a1d67eb5 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -260,7 +260,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, long rate; int ret; - if (IS_ERR(d->clk) || !old) + if (IS_ERR(d->clk)) goto out; clk_disable_unprepare(d->clk); @@ -672,6 +672,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { { "APMC0D08", 0}, { "AMD0020", 0 }, { "AMDI0020", 0 }, + { "BRCM2032", 0 }, { "HISI0031", 0 }, { }, }; diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index e0aa5f03004cc6c1754684716835acbd1f7f19b9..411b4b03457bbbd7bac5c1f2ed542e81ebda47e8 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -436,7 +436,11 @@ static irqreturn_t exar_misc_handler(int irq, void *data) struct exar8250 *priv = data; /* Clear all PCI interrupts by reading INT0. No effect on IIR */ - ioread8(priv->virt + UART_EXAR_INT0); + readb(priv->virt + UART_EXAR_INT0); + + /* Clear INT0 for Expansion Interface slave ports, too */ + if (priv->board->num_ports > 8) + readb(priv->virt + 0x2000 + UART_EXAR_INT0); return IRQ_HANDLED; } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index be456ea27ab27985865c8dccbdf7be22a23e7b87..ecf3d631bc09f5670a1748f8b8a89c65b6ba4132 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -94,8 +94,7 @@ static const struct serial8250_config uart_config[] = { .name = "16550A", .fifo_size = 16, .tx_loadsz = 16, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | - UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .rxtrig_bytes = {1, 4, 8, 14}, .flags = UART_CAP_FIFO, }, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 86c17b7ba91f99e1edf030c4340609587b635bdc..7f131e901d7d791859543e765aa9c58b465ce616 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -130,6 +130,7 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); reg |= DWC3_GCTL_PRTCAPDIR(mode); dwc3_writel(dwc->regs, DWC3_GCTL, reg); + dwc->current_dr_role = mode; } void dwc3_en_sleep_mode(struct dwc3 *dwc) @@ -1536,8 +1537,18 @@ static int dwc3_resume(struct device *dev) int ret; /* Check if platform glue driver handling PM, if not then handle here */ - if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0)) + if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT, 0)) { + /* + * If the core was in host mode during suspend, then set the + * runtime PM state as active to reflect actual state of device + * which is now out of LPM. This allows runtime_suspend later. + */ + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && + dwc->host_poweroff_in_pm_suspend) + goto runtime_set_active; + return 0; + } pinctrl_pm_select_default_state(dev); @@ -1545,6 +1556,7 @@ static int dwc3_resume(struct device *dev) if (ret) return ret; +runtime_set_active: pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 3f791a8760df565a72db755b946659aed9648560..fb6cd97214a32cb6a7deabf718dea4fe8e3e7c76 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -254,6 +254,7 @@ #define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28) #define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27) #define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24) +#define DWC3_GUSB3PIPECTL_DISRXDETU3 BIT(22) #define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19) #define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7) #define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1) @@ -854,6 +855,7 @@ struct dwc3_scratchpad_array { #define DWC3_GSI_EVT_BUF_SETUP 11 #define DWC3_GSI_EVT_BUF_CLEANUP 12 #define DWC3_GSI_EVT_BUF_FREE 13 +#define DWC3_CONTROLLER_NOTIFY_CLEAR_DB 14 #define MAX_INTR_STATS 10 @@ -1189,6 +1191,12 @@ struct dwc3 { unsigned int vbus_active:1; /* Indicate if software connect was issued by the usb_gadget_driver */ unsigned int softconnect:1; + /* + * If true, PM suspend allowed irrespective of host runtimePM state + * and core will power collapse. This also leads to reset-resume of + * connected devices on PM resume. + */ + bool host_poweroff_in_pm_suspend; }; #define work_to_dwc(w) (container_of((w), struct dwc3, drd_work)) diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 31639315c9d62b41892e4173498d66edafd7a6b8..75b96d398142f2725de0344548357ec57646b20d 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -181,6 +181,13 @@ static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = { {"ss_phy_irq", 0}, }; +static const char * const gsi_op_strings[] = { + "EP_CONFIG", "START_XFER", "STORE_DBL_INFO", + "ENABLE_GSI", "UPDATE_XFER", "RING_DB", + "END_XFER", "GET_CH_INFO", "GET_XFER_IDX", "PREPARE_TRBS", + "FREE_TRBS", "SET_CLR_BLOCK_DBL", "CHECK_FOR_SUSP", + "EP_DISABLE" }; + struct dwc3_msm; struct extcon_nb { @@ -189,7 +196,7 @@ struct extcon_nb { int idx; struct notifier_block vbus_nb; struct notifier_block id_nb; - struct notifier_block host_restart_nb; + struct notifier_block blocking_sync_nb; }; /* Input bits to state machine (mdwc->inputs) */ @@ -238,6 +245,7 @@ struct dwc3_msm { struct work_struct restart_usb_work; bool in_restart; struct workqueue_struct *dwc3_wq; + struct workqueue_struct *sm_usb_wq; struct delayed_work sm_work; unsigned long inputs; unsigned int max_power; @@ -302,7 +310,7 @@ static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc); static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA); static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, unsigned int value); -static int dwc3_restart_usb_host_mode(struct notifier_block *nb, +static int dwc3_usb_blocking_sync(struct notifier_block *nb, unsigned long event, void *ptr); /** @@ -1379,6 +1387,13 @@ static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend) return true; } +static inline const char *gsi_op_to_string(unsigned int op) +{ + if (op < ARRAY_SIZE(gsi_op_strings)) + return gsi_op_strings[op]; + + return "Invalid"; +} /** * Performs GSI operations or GSI EP related operations. @@ -1402,41 +1417,36 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, bool block_db, f_suspend; unsigned long flags; + dbg_log_string("%s(%d):%s", ep->name, ep->ep_num, gsi_op_to_string(op)); + switch (op) { case GSI_EP_OP_PREPARE_TRBS: request = (struct usb_gsi_request *)op_data; - dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name); ret = gsi_prepare_trbs(ep, request); break; case GSI_EP_OP_FREE_TRBS: - dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name); request = (struct usb_gsi_request *)op_data; gsi_free_trbs(ep, request); break; case GSI_EP_OP_CONFIG: request = (struct usb_gsi_request *)op_data; - dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name); spin_lock_irqsave(&dwc->lock, flags); gsi_configure_ep(ep, request); spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_STARTXFER: - dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name); spin_lock_irqsave(&dwc->lock, flags); ret = gsi_startxfer_for_ep(ep); spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_GET_XFER_IDX: - dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name); ret = gsi_get_xfer_index(ep); break; case GSI_EP_OP_STORE_DBL_INFO: - dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n"); request = (struct usb_gsi_request *)op_data; gsi_store_ringbase_dbl_info(ep, request); break; case GSI_EP_OP_ENABLE_GSI: - dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n"); gsi_enable(ep); break; case GSI_EP_OP_GET_CH_INFO: @@ -1445,36 +1455,29 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, break; case GSI_EP_OP_RING_DB: request = (struct usb_gsi_request *)op_data; - dbg_print(0xFF, "RING_DB", 0, ep->name); gsi_ring_db(ep, request); break; case GSI_EP_OP_UPDATEXFER: request = (struct usb_gsi_request *)op_data; - dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n"); spin_lock_irqsave(&dwc->lock, flags); ret = gsi_updatexfer_for_ep(ep, request); spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_ENDXFER: request = (struct usb_gsi_request *)op_data; - dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name); spin_lock_irqsave(&dwc->lock, flags); gsi_endxfer_for_ep(ep); spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_SET_CLR_BLOCK_DBL: block_db = *((bool *)op_data); - dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n", - block_db); gsi_set_clear_dbell(ep, block_db); break; case GSI_EP_OP_CHECK_FOR_SUSPEND: - dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n"); f_suspend = *((bool *)op_data); ret = gsi_check_ready_to_suspend(ep, f_suspend); break; case GSI_EP_OP_DISABLE: - dev_dbg(mdwc->dev, "EP_OP_DISABLE\n"); ret = ep->ops->disable(ep); break; default: @@ -1705,7 +1708,7 @@ static int msm_dwc3_usbdev_notify(struct notifier_block *self, } mdwc->hc_died = true; - schedule_delayed_work(&mdwc->sm_work, 0); + queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0); return 0; } @@ -1999,6 +2002,12 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, case DWC3_CONTROLLER_NOTIFY_DISABLE_UPDXFER: dwc3_msm_dbm_disable_updxfer(dwc, value); break; + case DWC3_CONTROLLER_NOTIFY_CLEAR_DB: + dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_CLEAR_DB\n"); + dwc3_msm_write_reg_field(mdwc->base, + GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]), + BLOCK_GSI_WR_GO_MASK, true); + break; default: dev_dbg(mdwc->dev, "unknown dwc3 event\n"); break; @@ -2060,15 +2069,34 @@ static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc) if (ret) dev_err(mdwc->dev, "%s: dwc3_core init failed (%d)\n", __func__, ret); + + /* Get initial P3 status and enable IN_P3 event */ + if (dwc3_is_usb31(dwc)) + val = dwc3_msm_read_reg_field(mdwc->base, + DWC31_LINK_GDBGLTSSM, + DWC3_GDBGLTSSM_LINKSTATE_MASK); + else + val = dwc3_msm_read_reg_field(mdwc->base, + DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK); + atomic_set(&mdwc->in_p3, val == DWC3_LINK_STATE_U3); + dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG, + PWR_EVNT_POWERDOWN_IN_P3_MASK, 1); + + /* Set the core in host mode if it was in host mode during pm_suspend */ + if (mdwc->in_host_mode) { + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_en_sleep_mode(dwc); + } + } -static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc) +static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc, bool ignore_p3_state) { unsigned long timeout; u32 reg = 0; - if ((mdwc->in_host_mode || mdwc->in_device_mode) - && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) { + if (!ignore_p3_state && ((mdwc->in_host_mode || mdwc->in_device_mode) + && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart)) { if (!atomic_read(&mdwc->in_p3)) { dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n"); return -EBUSY; @@ -2267,7 +2295,7 @@ static int dwc3_msm_update_bus_bw(struct dwc3_msm *mdwc, enum bus_vote bv) return ret; } -static int dwc3_msm_suspend(struct dwc3_msm *mdwc) +static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse) { int ret; struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); @@ -2329,7 +2357,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) return -EBUSY; } - ret = dwc3_msm_prepare_suspend(mdwc); + ret = dwc3_msm_prepare_suspend(mdwc, force_power_collapse); if (ret) { mutex_unlock(&mdwc->suspend_resume_mutex); return ret; @@ -2363,6 +2391,12 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) (!mdwc->use_pwr_event_for_wakeup || no_active_ss); /* Suspend SS PHY */ if (can_suspend_ssphy) { + if (mdwc->in_host_mode) { + u32 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + + reg |= DWC3_GUSB3PIPECTL_DISRXDETU3; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + } /* indicate phy about SS mode */ if (dwc3_msm_is_superspeed(mdwc)) mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE; @@ -2401,8 +2435,8 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) clk_disable_unprepare(mdwc->xo_clk); /* Perform controller power collapse */ - if (!mdwc->in_host_mode && (!mdwc->in_device_mode || - mdwc->in_restart)) { + if (!(mdwc->in_host_mode || mdwc->in_device_mode) || + mdwc->in_restart || force_power_collapse) { mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE; dev_dbg(mdwc->dev, "%s: power collapse\n", __func__); dwc3_msm_config_gdsc(mdwc, 0); @@ -2547,6 +2581,13 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) usb_phy_set_suspend(mdwc->ss_phy, 0); mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE; mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND; + + if (mdwc->in_host_mode) { + u32 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + + reg &= ~DWC3_GUSB3PIPECTL_DISRXDETU3; + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + } } mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE); @@ -2555,8 +2596,6 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) /* Recover from controller power collapse */ if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) { - u32 tmp; - if (mdwc->iommu_map) { ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map); @@ -2571,18 +2610,6 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) dwc3_msm_power_collapse_por(mdwc); - /* Get initial P3 status and enable IN_P3 event */ - if (dwc3_is_usb31(dwc)) - tmp = dwc3_msm_read_reg_field(mdwc->base, - DWC31_LINK_GDBGLTSSM, - DWC3_GDBGLTSSM_LINKSTATE_MASK); - else - tmp = dwc3_msm_read_reg_field(mdwc->base, - DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK); - atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3); - dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG, - PWR_EVNT_POWERDOWN_IN_P3_MASK, 1); - mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE; } @@ -2665,7 +2692,7 @@ static void dwc3_ext_event_notify(struct dwc3_msm *mdwc) clear_bit(B_SUSPEND, &mdwc->inputs); } - schedule_delayed_work(&mdwc->sm_work, 0); + queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, 0); } static void dwc3_resume_work(struct work_struct *w) @@ -2718,6 +2745,13 @@ static void dwc3_resume_work(struct work_struct *w) ORIENTATION_CC2 : ORIENTATION_CC1; dbg_event(0xFF, "cc_state", mdwc->typec_orientation); + + ret = extcon_get_property(edev, extcon_id, + EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT, &val); + if (!ret) + dwc->gadget.is_selfpowered = val.intval; + else + dwc->gadget.is_selfpowered = 0; } /* @@ -3085,10 +3119,10 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc) if (ret < 0) check_id_state = false; - mdwc->extcon[idx].host_restart_nb.notifier_call = - dwc3_restart_usb_host_mode; + mdwc->extcon[idx].blocking_sync_nb.notifier_call = + dwc3_usb_blocking_sync; extcon_register_blocking_notifier(edev, EXTCON_USB_HOST, - &mdwc->extcon[idx].host_restart_nb); + &mdwc->extcon[idx].blocking_sync_nb); /* Update initial VBUS/ID state */ if (check_vbus_state && extcon_get_state(edev, EXTCON_USB)) @@ -3231,6 +3265,8 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr, req_speed = USB_SPEED_HIGH; else if (sysfs_streq(buf, "super")) req_speed = USB_SPEED_SUPER; + else if (sysfs_streq(buf, "ssp")) + req_speed = USB_SPEED_SUPER_PLUS; else return -EINVAL; @@ -3363,6 +3399,18 @@ static int dwc3_msm_probe(struct platform_device *pdev) return -ENOMEM; } + /* + * Create freezable workqueue for sm_work so that it gets scheduled only + * after pm_resume has happened completely. This helps in avoiding race + * conditions between xhci_plat_resume and xhci_runtime_resume; and also + * between hcd disconnect and xhci_resume. + */ + mdwc->sm_usb_wq = create_freezable_workqueue("k_sm_usb"); + if (!mdwc->sm_usb_wq) { + destroy_workqueue(mdwc->dwc3_wq); + return -ENOMEM; + } + /* Get all clks and gdsc reference */ ret = dwc3_msm_get_clk_gdsc(mdwc); if (ret) { @@ -3614,6 +3662,12 @@ static int dwc3_msm_probe(struct platform_device *pdev) mdwc->pm_qos_latency = 0; } + if (of_property_read_bool(node, "qcom,host-poweroff-in-pm-suspend")) { + dwc->host_poweroff_in_pm_suspend = true; + dev_dbg(mdwc->dev, "%s: Core power collapse on host PM suspend\n", + __func__); + } + mutex_init(&mdwc->suspend_resume_mutex); if (of_property_read_bool(node, "extcon")) { @@ -3621,13 +3675,15 @@ static int dwc3_msm_probe(struct platform_device *pdev) if (ret) goto put_dwc3; } else { - if (dwc->dr_mode == USB_DR_MODE_OTG || - dwc->dr_mode == USB_DR_MODE_PERIPHERAL) { - dev_dbg(mdwc->dev, "%s: no extcon, simulate vbus connect\n", + if ((dwc->dr_mode == USB_DR_MODE_OTG && + !of_property_read_bool(node, "qcom,default-mode-host")) || + dwc->dr_mode == USB_DR_MODE_PERIPHERAL) { + dev_dbg(mdwc->dev, "%s: no extcon, start peripheral mode\n", __func__); mdwc->vbus_active = true; - } else if (dwc->dr_mode == USB_DR_MODE_HOST) { - dev_dbg(mdwc->dev, "DWC3 in host only mode\n"); + } else { + dev_dbg(mdwc->dev, "%s: no extcon, start host mode\n", + __func__); mdwc->id_state = DWC3_ID_GROUND; } @@ -3652,6 +3708,7 @@ static int dwc3_msm_probe(struct platform_device *pdev) } of_platform_depopulate(&pdev->dev); err: + destroy_workqueue(mdwc->sm_usb_wq); destroy_workqueue(mdwc->dwc3_wq); return ret; } @@ -3732,6 +3789,9 @@ static int dwc3_msm_remove(struct platform_device *pdev) arm_iommu_release_mapping(mdwc->iommu_map); } + destroy_workqueue(mdwc->sm_usb_wq); + destroy_workqueue(mdwc->dwc3_wq); + return 0; } @@ -4087,58 +4147,46 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on) return 0; } -/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */ -static int dwc3_restart_usb_host_mode(struct notifier_block *nb, +static int dwc3_usb_blocking_sync(struct notifier_block *nb, unsigned long event, void *ptr) { struct dwc3 *dwc; struct extcon_dev *edev = ptr; struct extcon_nb *enb = container_of(nb, struct extcon_nb, - host_restart_nb); + blocking_sync_nb); struct dwc3_msm *mdwc = enb->mdwc; - int ret = -EINVAL, usb_speed; + int ret = 0; if (!edev || !mdwc) return NOTIFY_DONE; dwc = platform_get_drvdata(mdwc->dwc3); - usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER); - if (dwc->maximum_speed == usb_speed) - return 0; - - dbg_event(0xFF, "fw_restarthost", 0); + dbg_event(0xFF, "fw_blocksync", 0); + flush_work(&mdwc->resume_work); flush_delayed_work(&mdwc->sm_work); - if (!mdwc->in_host_mode) - goto err; + if (!mdwc->in_host_mode && !mdwc->in_device_mode) { + dbg_event(0xFF, "lpm_state", atomic_read(&dwc->in_lpm)); - dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed); - ret = dwc3_otg_start_host(mdwc, 0); - if (ret) - goto err; + /* + * stop host mode functionality performs autosuspend with mdwc + * device, and it may take sometime to call PM runtime suspend. + * Hence call pm_runtime_suspend() API to invoke PM runtime + * suspend immediately to put USB controller and PHYs into + * suspend. + */ + ret = pm_runtime_suspend(mdwc->dev); + dbg_event(0xFF, "pm_runtime_sus", ret); + + /* + * If mdwc device is already suspended, pm_runtime_suspend() API + * returns 1, which is not error. Overwrite with zero if it is. + */ + if (ret > 0) + ret = 0; + } - dbg_event(0xFF, "USB_lpm_state", atomic_read(&dwc->in_lpm)); - /* - * stop host mode functionality performs autosuspend with mdwc - * device, and it may take sometime to call PM runtime suspend. - * Hence call pm_runtime_suspend() API to invoke PM runtime - * suspend immediately to put USB controller and PHYs into suspend. - */ - ret = pm_runtime_suspend(mdwc->dev); - /* - * If mdwc device is already suspended, pm_runtime_suspend() API - * returns 1, which is not error. Overwrite with zero if it is. - */ - if (ret > 0) - ret = 0; - dbg_event(0xFF, "pm_runtime_sus", ret); - - dwc->maximum_speed = usb_speed; - mdwc->otg_state = OTG_STATE_B_IDLE; - schedule_delayed_work(&mdwc->sm_work, 0); - dbg_event(0xFF, "complete_host_change", dwc->maximum_speed); -err: return ret; } @@ -4384,7 +4432,7 @@ static void dwc3_otg_sm_work(struct work_struct *w) } if (work) - schedule_delayed_work(&mdwc->sm_work, delay); + queue_delayed_work(mdwc->sm_usb_wq, &mdwc->sm_work, delay); ret: return; @@ -4401,12 +4449,36 @@ static int dwc3_msm_pm_suspend(struct device *dev) dbg_event(0xFF, "PM Sus", 0); flush_workqueue(mdwc->dwc3_wq); - if (!atomic_read(&dwc->in_lpm)) { - dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n"); - return -EBUSY; + + /* + * Check if pm_suspend can proceed irrespective of runtimePM state of + * host. + */ + if (!dwc->host_poweroff_in_pm_suspend || !mdwc->in_host_mode) { + if (!atomic_read(&dwc->in_lpm)) { + dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n"); + return -EBUSY; + } + + atomic_set(&mdwc->pm_suspended, 1); + + return 0; } - ret = dwc3_msm_suspend(mdwc); + /* + * PHYs also need to be power collapsed, so call notify_disconnect + * before suspend to ensure it. + */ + usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH); + mdwc->hs_phy->flags &= ~PHY_HOST_MODE; + usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER); + mdwc->ss_phy->flags &= ~PHY_HOST_MODE; + + /* + * Power collapse the core. Hence call dwc3_msm_suspend with + * 'force_power_collapse' set to 'true'. + */ + ret = dwc3_msm_suspend(mdwc, true); if (!ret) atomic_set(&mdwc->pm_suspended, 1); @@ -4425,6 +4497,28 @@ static int dwc3_msm_pm_resume(struct device *dev) flush_workqueue(mdwc->dwc3_wq); atomic_set(&mdwc->pm_suspended, 0); + if (!dwc->host_poweroff_in_pm_suspend || !mdwc->in_host_mode) { + /* kick in otg state machine */ + queue_work(mdwc->dwc3_wq, &mdwc->resume_work); + + return 0; + } + + /* Resume dwc to avoid unclocked access by xhci_plat_resume */ + dwc3_msm_resume(mdwc); + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + /* Restore PHY flags if hibernated in host mode */ + mdwc->hs_phy->flags |= PHY_HOST_MODE; + usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH); + if (dwc->maximum_speed >= USB_SPEED_SUPER) { + mdwc->ss_phy->flags |= PHY_HOST_MODE; + usb_phy_notify_connect(mdwc->ss_phy, + USB_SPEED_SUPER); + } + /* kick in otg state machine */ queue_work(mdwc->dwc3_wq, &mdwc->resume_work); @@ -4452,7 +4546,7 @@ static int dwc3_msm_runtime_suspend(struct device *dev) dev_dbg(dev, "DWC3-msm runtime suspend\n"); dbg_event(0xFF, "RT Sus", 0); - return dwc3_msm_suspend(mdwc); + return dwc3_msm_suspend(mdwc, false); } static int dwc3_msm_runtime_resume(struct device *dev) diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 772c5b4e75fa3f542726bdd40f7a6214849ac36b..5ef1377d441a26f9aeb844e669ed5d56b11dbd08 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -419,9 +419,23 @@ static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state, (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) return -EINVAL; - if (dwc->usb3_u1u2_disable && !enable_dwc3_u1u2) + /* Ignore all other checks if u1/u2 is enabled from user */ + if (enable_dwc3_u1u2) + goto enable_u1; + + /* + * STAR: "9001276244: LFPS Handshake Interoperability Issues" + * has two part workaround, first part is to disable u1/u2 + * in case of SSP to avoid interoperability issues. + */ + if (dwc->revision == DWC3_USB31_REVISION_170A && + (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + if (dwc->usb3_u1u2_disable) return -EINVAL; +enable_u1: reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (set) reg |= DWC3_DCTL_INITU1ENA; @@ -444,9 +458,23 @@ static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state, (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) return -EINVAL; - if (dwc->usb3_u1u2_disable && !enable_dwc3_u1u2) + /* Ignore all other checks if u1/u2 is enabled from user */ + if (enable_dwc3_u1u2) + goto enable_u2; + + /* + * STAR: "9001276244: LFPS Handshake Interoperability Issues" + * has two part workaround, first part is to disable u1/u2 + * in case of SSP to avoid interoperability issues. + */ + if (dwc->revision == DWC3_USB31_REVISION_170A && + (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + if (dwc->usb3_u1u2_disable) return -EINVAL; +enable_u2: reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (set) reg |= DWC3_DCTL_INITU2ENA; @@ -714,16 +742,33 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED); - if (!dwc->usb3_u1u2_disable || enable_dwc3_u1u2) { - /* - * Enable transition to U1/U2 state when - * nothing is pending from application. - */ - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - reg |= (DWC3_DCTL_ACCEPTU1ENA | - DWC3_DCTL_ACCEPTU2ENA); - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - } + /* + * Ignore all other checks if u1/u2 is enabled + * from user + */ + if (enable_dwc3_u1u2) + goto enable_u1u2; + /* + * STAR:"9001276244: LFPS Handshake Interoperability + * Issues" has two part workaround, first part is to + * disable u1/u2 in case of SSP to avoid + * interoperability issues. + */ + if (dwc->revision == DWC3_USB31_REVISION_170A && + (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) + break; + + if (dwc->usb3_u1u2_disable) + break; +enable_u1u2: + /* + * Enable transition to U1/U2 state when + * nothing is pending from application. + */ + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + reg |= (DWC3_DCTL_ACCEPTU1ENA | + DWC3_DCTL_ACCEPTU2ENA); + dwc3_writel(dwc->regs, DWC3_DCTL, reg); } break; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 06c53abc766799a6e83bb6aa625d40add678e5a8..9ca5bfb80142f8a35fcb066788e6302054e57812 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -846,6 +846,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_request *req; + dbg_log_string("START for %s(%d)", dep->name, dep->number); dwc3_stop_active_transfer(dwc, dep->number, true); /* - giveback all requests to gadget driver */ @@ -860,12 +861,14 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) dwc3_gadget_giveback(dep, req, -ESHUTDOWN); } + dbg_log_string("DONE for %s(%d)", dep->name, dep->number); } static void dwc3_stop_active_transfers(struct dwc3 *dwc) { u32 epnum; + dbg_log_string("START"); for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { struct dwc3_ep *dep; @@ -876,8 +879,13 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc) if (!(dep->flags & DWC3_EP_ENABLED)) continue; + if (dep->endpoint.ep_type == EP_TYPE_GSI && dep->direction) + dwc3_notify_event(dwc, + DWC3_CONTROLLER_NOTIFY_CLEAR_DB, 0); + dwc3_remove_requests(dwc, dep); } + dbg_log_string("DONE"); } /** @@ -1178,6 +1186,9 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) { u8 tmp = index; + if (!dep->trb_pool) + return NULL; + if (!tmp) tmp = DWC3_TRB_NUM - 1; @@ -1198,7 +1209,7 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) */ if (dep->trb_enqueue == dep->trb_dequeue) { tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); - if (tmp->ctrl & DWC3_TRB_CTRL_HWO) + if (!tmp || tmp->ctrl & DWC3_TRB_CTRL_HWO) return 0; return DWC3_TRB_NUM - 1; @@ -1716,7 +1727,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) else trb = &dwc->ep0_trb[dep->trb_enqueue]; - transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; + if (trb) + transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO; + else + transfer_in_flight = false; + started = !list_empty(&dep->started_list); if (!protocol && ((dep->direction && transfer_in_flight) || @@ -2074,7 +2089,7 @@ static int dwc_gadget_func_wakeup(struct usb_gadget *g, int interface_id) if (dwc3_gadget_is_suspended(dwc)) { dev_dbg(dwc->dev, "USB bus is suspended, scheduling wakeup\n"); dwc3_gadget_wakeup(&dwc->gadget); - return -EAGAIN; + return -EACCES; } ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_XMIT_DEV, @@ -2789,15 +2804,16 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, const struct dwc3_event_depevt *event, int status) { - struct dwc3_request *req, *n; + struct dwc3_request *req; struct dwc3_trb *trb; bool ioc = false; int ret = 0; - list_for_each_entry_safe(req, n, &dep->started_list, list) { + while (!list_empty(&dep->started_list)) { unsigned length; int chain; + req = next_request(&dep->started_list); length = req->request.length; chain = req->num_pending_sgs > 0; if (chain) { @@ -3134,6 +3150,8 @@ void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep->flags |= DWC3_EP_END_TRANSFER_PENDING; udelay(100); } + dbg_log_string("%s(%d): endxfer ret:%d)", + dep->name, dep->number, ret); } static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 3d8dc08c76b7dacd630a6fa9707ed08266924d3a..762f67e4df241309fd0673f33179900a5aeb0b66 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -134,6 +134,9 @@ int dwc3_host_init(struct dwc3 *dwc) if (dwc->revision <= DWC3_REVISION_300A) props[prop_idx++].name = "quirk-broken-port-ped"; + if (dwc->host_poweroff_in_pm_suspend) + props[prop_idx++].name = "host-poweroff-in-pm-suspend"; + if (prop_idx) { ret = platform_device_add_properties(xhci, props); if (ret) { diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 87ecf278d42f7889a6ec2702b473f557b7bf108a..4df3b587a3777e701676a9b67b552decb323ba67 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -150,6 +150,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { + struct usb_composite_dev *cdev; struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; @@ -161,6 +162,8 @@ int config_ep_by_speed(struct usb_gadget *g, if (!g || !f || !_ep) return -EIO; + cdev = get_gadget_data(g); + /* select desired speed */ switch (g->speed) { case USB_SPEED_SUPER_PLUS: @@ -186,6 +189,13 @@ int config_ep_by_speed(struct usb_gadget *g, default: speed_desc = f->fs_descriptors; } + + if (!speed_desc) { + DBG(cdev, "%s desc not present for function %s\n", + usb_speed_string(g->speed), f->name); + return -EIO; + } + /* find descriptors */ for_each_ep_desc(speed_desc, d_spd) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; @@ -228,12 +238,9 @@ int config_ep_by_speed(struct usb_gadget *g, _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: - if (comp_desc->bMaxBurst != 0) { - struct usb_composite_dev *cdev; - - cdev = get_gadget_data(g); + if (comp_desc->bMaxBurst != 0) ERROR(cdev, "ep0 bMaxBurst must be 0\n"); - } + _ep->maxburst = 1; break; } @@ -261,7 +268,7 @@ int usb_add_function(struct usb_configuration *config, { int value = -EINVAL; - DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", + DBG(config->cdev, "adding '%s'/%pK to config '%s'/%pK\n", function->name, function, config->label, config); @@ -304,7 +311,7 @@ int usb_add_function(struct usb_configuration *config, done: if (value) - DBG(config->cdev, "adding '%s'/%p --> %d\n", + DBG(config->cdev, "adding '%s'/%pK --> %d\n", function->name, function, value); return value; } @@ -475,11 +482,17 @@ int usb_func_wakeup(struct usb_function *func) spin_lock_irqsave(&func->config->cdev->lock, flags); ret = usb_func_wakeup_int(func); - if (ret == -EAGAIN) { + if (ret == -EACCES) { DBG(func->config->cdev, "Function wakeup for %s could not complete due to suspend state. Delayed until after bus resume.\n", func->name ? func->name : ""); ret = 0; + func->func_wakeup_pending = 1; + } else if (ret == -EAGAIN) { + DBG(func->config->cdev, + "Function wakeup for %s sent.\n", + func->name ? func->name : ""); + ret = 0; } else if (ret < 0 && ret != -ENOTSUPP) { ERROR(func->config->cdev, "Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n", @@ -509,7 +522,12 @@ int usb_func_ep_queue(struct usb_function *func, struct usb_ep *ep, gadget = func->config->cdev->gadget; if (func->func_is_suspended && func->func_wakeup_allowed) { ret = usb_gadget_func_wakeup(gadget, func->intf_id); - if (ret == -EAGAIN) { + if (ret == -EACCES) { + pr_debug("bus suspended func wakeup for %s delayed until bus resume.\n", + func->name ? func->name : ""); + func->func_wakeup_pending = 1; + ret = -EAGAIN; + } else if (ret == -EAGAIN) { pr_debug("bus suspended func wakeup for %s delayed until bus resume.\n", func->name ? func->name : ""); } else if (ret < 0 && ret != -ENOTSUPP) { @@ -574,6 +592,10 @@ static int config_buf(struct usb_configuration *config, c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c->bMaxPower = encode_bMaxPower(speed, config); + if (config->cdev->gadget->is_selfpowered) { + c->bmAttributes |= USB_CONFIG_ATT_SELFPOWER; + c->bMaxPower = 0; + } /* There may be e.g. OTG descriptors */ if (config->descriptors) { @@ -935,7 +957,7 @@ static int set_config(struct usb_composite_dev *cdev, result = f->set_alt(f, tmp, 0); if (result < 0) { - DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", + DBG(cdev, "interface %d (%s/%pK) alt 0 --> %d\n", tmp, f->name, f, result); reset_config(cdev); @@ -1013,7 +1035,7 @@ int usb_add_config(struct usb_composite_dev *cdev, if (!bind) goto done; - DBG(cdev, "adding config #%u '%s'/%p\n", + DBG(cdev, "adding config #%u '%s'/%pK\n", config->bConfigurationValue, config->label, config); @@ -1030,7 +1052,7 @@ int usb_add_config(struct usb_composite_dev *cdev, struct usb_function, list); list_del(&f->list); if (f->unbind) { - DBG(cdev, "unbind function '%s'/%p\n", + DBG(cdev, "unbind function '%s'/%pK\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ @@ -1041,7 +1063,7 @@ int usb_add_config(struct usb_composite_dev *cdev, } else { unsigned i; - DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n", + DBG(cdev, "cfg %d/%pK speeds:%s%s%s%s\n", config->bConfigurationValue, config, config->superspeed_plus ? " superplus" : "", config->superspeed ? " super" : "", @@ -1057,7 +1079,7 @@ int usb_add_config(struct usb_composite_dev *cdev, if (!f) continue; - DBG(cdev, " interface %d = %s/%p\n", + DBG(cdev, " interface %d = %s/%pK\n", i, f->name, f); } } @@ -1086,7 +1108,7 @@ static void remove_config(struct usb_composite_dev *cdev, } list_del(&config->list); if (config->unbind) { - DBG(cdev, "unbind config '%s'/%p\n", config->label, config); + DBG(cdev, "unbind config '%s'/%pK\n", config->label, config); config->unbind(config); /* may free memory for "c" */ } @@ -1494,7 +1516,7 @@ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) else if (cdev->os_desc_req == req) cdev->os_desc_pending = false; else - WARN(1, "unknown request %p\n", req); + WARN(1, "unknown request %pK\n", req); } static int composite_ep0_queue(struct usb_composite_dev *cdev, @@ -1509,7 +1531,7 @@ static int composite_ep0_queue(struct usb_composite_dev *cdev, else if (cdev->os_desc_req == req) cdev->os_desc_pending = true; else - WARN(1, "unknown request %p\n", req); + WARN(1, "unknown request %pK\n", req); } return ret; @@ -2422,22 +2444,24 @@ void composite_resume(struct usb_gadget *gadget) spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { - ret = usb_func_wakeup_int(f); - if (ret) { - if (ret == -EAGAIN) { - ERROR(f->config->cdev, - "Function wakeup for %s could not complete due to suspend state.\n", - f->name ? f->name : ""); - break; - } else if (ret != -ENOTSUPP) { - ERROR(f->config->cdev, - "Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n", - f->name ? f->name : "", - ret); + if (f->func_wakeup_pending) { + ret = usb_func_wakeup_int(f); + if (ret) { + if (ret == -EAGAIN) { + ERROR(f->config->cdev, + "Function wakeup for %s could not complete due to suspend state.\n", + f->name ? f->name : ""); + } else if (ret != -ENOTSUPP) { + ERROR(f->config->cdev, + "Failed to wake function %s from suspend state. ret=%d. Canceling USB request.\n", + f->name ? f->name : "", + ret); + } } + f->func_wakeup_pending = 0; } - if (f->resume) + if (gadget->speed != USB_SPEED_SUPER && f->resume) f->resume(f); } diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 181faf1f65bc75226c44aba8569077dfffa8aced..19e18a244b0f2aabb90c897ccfd4408a8f850516 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1260,7 +1260,7 @@ static void purge_configs_funcs(struct gadget_info *gi) list_move_tail(&f->list, &cfg->func_list); if (f->unbind) { dev_dbg(&gi->cdev.gadget->dev, - "unbind function '%s'/%p\n", + "unbind function '%s'/%pK\n", f->name, f); f->unbind(c, f); } @@ -1463,7 +1463,7 @@ static void android_work(struct work_struct *data) } if (!uevent_sent) { - pr_info("%s: did not send uevent (%d %d %p)\n", __func__, + pr_info("%s: did not send uevent (%d %d %pK)\n", __func__, gi->connected, gi->sw_connected, cdev->config); } } diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 1242ba78111efa04f1a52b4407ff9817ad3588ef..ca97f6e9ea87901c86828cbf3b3b655754a8c2fd 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -515,7 +515,7 @@ static int create_bulk_endpoints(struct acc_dev *dev, struct usb_ep *ep; int i; - DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + DBG(cdev, "%s dev: %pK\n", __func__, dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { @@ -606,7 +606,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf, r = -EIO; goto done; } else { - pr_debug("rx %p queue\n", req); + pr_debug("rx %pK queue\n", req); } /* wait for a request to complete */ @@ -629,7 +629,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf, if (req->actual == 0) goto requeue_req; - pr_debug("rx %p %u\n", req, req->actual); + pr_debug("rx %pK %u\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; r = xfer; if (copy_to_user(buf, req->buf, xfer)) @@ -780,6 +780,9 @@ static const struct file_operations acc_fops = { .read = acc_read, .write = acc_write, .unlocked_ioctl = acc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = acc_ioctl, +#endif .open = acc_open, .release = acc_release, }; @@ -941,7 +944,7 @@ __acc_function_bind(struct usb_configuration *c, int id; int ret; - DBG(cdev, "acc_function_bind dev: %p\n", dev); + DBG(cdev, "acc_function_bind dev: %pK\n", dev); if (configfs) { if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) { @@ -1120,7 +1123,7 @@ static void acc_hid_work(struct work_struct *data) list_for_each_safe(entry, temp, &new_list) { hid = list_entry(entry, struct acc_hid_dev, list); if (acc_hid_init(hid)) { - pr_err("can't add HID device %p\n", hid); + pr_err("can't add HID device %pK\n", hid); acc_hid_delete(hid); } else { spin_lock_irqsave(&dev->lock, flags); diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 5e3828d9dac7f3af922456d141191ddd0733bbaf..4f2b84749a7d4639593272444dcd93c199afa51e 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -704,7 +704,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) if (acm->notify_req) gs_free_req(acm->notify, acm->notify_req); - ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); + ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status); return status; } diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index 09a1430c84a5edcbccf9b9b536de143164a413ab..dd16710a165a568fac3f6b9ad709904b34fa9733 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -851,7 +851,7 @@ static int usb_cser_alloc_requests(struct usb_ep *ep, struct list_head *head, int i; struct usb_request *req; - pr_debug("ep:%p head:%p num:%d size:%d cb:%p", + pr_debug("ep:%pK head:%p num:%d size:%d cb:%p", ep, head, num, size, cb); for (i = 0; i < num; i++) { @@ -901,7 +901,7 @@ static void usb_cser_start_rx(struct f_cdev *port) ret = usb_ep_queue(ep, req, GFP_KERNEL); spin_lock_irqsave(&port->port_lock, flags); if (ret) { - pr_err("port(%d):%p usb ep(%s) queue failed\n", + pr_err("port(%d):%pK usb ep(%s) queue failed\n", port->port_num, port, ep->name); list_add(&req->list, pool); break; @@ -916,7 +916,7 @@ static void usb_cser_read_complete(struct usb_ep *ep, struct usb_request *req) struct f_cdev *port = ep->driver_data; unsigned long flags; - pr_debug("ep:(%p)(%s) port:%p req_status:%d req->actual:%u\n", + pr_debug("ep:(%pK)(%s) port:%p req_status:%d req->actual:%u\n", ep, ep->name, port, req->status, req->actual); if (!port) { pr_err("port is null\n"); @@ -942,7 +942,7 @@ static void usb_cser_write_complete(struct usb_ep *ep, struct usb_request *req) unsigned long flags; struct f_cdev *port = ep->driver_data; - pr_debug("ep:(%p)(%s) port:%p req_stats:%d\n", + pr_debug("ep:(%pK)(%s) port:%p req_stats:%d\n", ep, ep->name, port, req->status); if (!port) { @@ -975,7 +975,7 @@ static void usb_cser_start_io(struct f_cdev *port) int ret = -ENODEV; unsigned long flags; - pr_debug("port: %p\n", port); + pr_debug("port: %pK\n", port); spin_lock_irqsave(&port->port_lock, flags); if (!port->is_connected) @@ -1018,7 +1018,7 @@ static void usb_cser_stop_io(struct f_cdev *port) struct usb_ep *out; unsigned long flags; - pr_debug("port:%p\n", port); + pr_debug("port:%pK\n", port); in = port->port_usb.in; out = port->port_usb.out; @@ -1061,7 +1061,7 @@ int f_cdev_open(struct inode *inode, struct file *file) } file->private_data = port; - pr_debug("opening port(%s)(%p)\n", port->name, port); + pr_debug("opening port(%s)(%pK)\n", port->name, port); ret = wait_event_interruptible(port->open_wq, port->is_connected); if (ret) { @@ -1074,7 +1074,7 @@ int f_cdev_open(struct inode *inode, struct file *file) spin_unlock_irqrestore(&port->port_lock, flags); usb_cser_start_rx(port); - pr_debug("port(%s)(%p) open is success\n", port->name, port); + pr_debug("port(%s)(%pK) open is success\n", port->name, port); return 0; } @@ -1094,7 +1094,7 @@ int f_cdev_release(struct inode *inode, struct file *file) port->port_open = false; port->cbits_updated = false; spin_unlock_irqrestore(&port->port_lock, flags); - pr_debug("port(%s)(%p) is closed.\n", port->name, port); + pr_debug("port(%s)(%pK) is closed.\n", port->name, port); return 0; } @@ -1118,7 +1118,7 @@ ssize_t f_cdev_read(struct file *file, return -EINVAL; } - pr_debug("read on port(%s)(%p) count:%zu\n", port->name, port, count); + pr_debug("read on port(%s)(%pK) count:%zu\n", port->name, port, count); spin_lock_irqsave(&port->port_lock, flags); current_rx_req = port->current_rx_req; pending_rx_bytes = port->pending_rx_bytes; @@ -1219,7 +1219,7 @@ ssize_t f_cdev_write(struct file *file, } spin_lock_irqsave(&port->port_lock, flags); - pr_debug("write on port(%s)(%p)\n", port->name, port); + pr_debug("write on port(%s)(%pK)\n", port->name, port); if (!port->is_connected) { spin_unlock_irqrestore(&port->port_lock, flags); @@ -1389,7 +1389,7 @@ static long f_cdev_ioctl(struct file *fp, unsigned int cmd, case TIOCMBIC: case TIOCMBIS: case TIOCMSET: - pr_debug("TIOCMSET on port(%s)%p\n", port->name, port); + pr_debug("TIOCMSET on port(%s)%pK\n", port->name, port); i = get_user(val, (uint32_t *)arg); if (i) { pr_err("Error getting TIOCMSET value\n"); @@ -1398,7 +1398,7 @@ static long f_cdev_ioctl(struct file *fp, unsigned int cmd, ret = f_cdev_tiocmset(port, val, ~val); break; case TIOCMGET: - pr_debug("TIOCMGET on port(%s)%p\n", port->name, port); + pr_debug("TIOCMGET on port(%s)%pK\n", port->name, port); ret = f_cdev_tiocmget(port); if (ret >= 0) { ret = put_user(ret, (uint32_t *)arg); @@ -1448,14 +1448,14 @@ int usb_cser_connect(struct f_cdev *port) return -ENODEV; } - pr_debug("port(%s) (%p)\n", port->name, port); + pr_debug("port(%s) (%pK)\n", port->name, port); cser = &port->port_usb; cser->notify_modem = usb_cser_notify_modem; ret = usb_ep_enable(cser->in); if (ret) { - pr_err("usb_ep_enable failed eptype:IN ep:%p, err:%d", + pr_err("usb_ep_enable failed eptype:IN ep:%pK, err:%d", cser->in, ret); return ret; } @@ -1463,7 +1463,7 @@ int usb_cser_connect(struct f_cdev *port) ret = usb_ep_enable(cser->out); if (ret) { - pr_err("usb_ep_enable failed eptype:OUT ep:%p, err: %d", + pr_err("usb_ep_enable failed eptype:OUT ep:%pK, err: %d", cser->out, ret); cser->in->driver_data = 0; return ret; @@ -1575,7 +1575,7 @@ static struct f_cdev *f_cdev_alloc(char *func_name, int portno) goto err_create_dev; } - pr_info("port_name:%s (%p) portno:(%d)\n", + pr_info("port_name:%s (%pK) portno:(%d)\n", port->name, port, port->port_num); return port; diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c index 02de6c31739acab7d3050c14e02835c9d431a986..9f5ab86d5d612c039a9ae244f980d60062f19f3f 100644 --- a/drivers/usb/gadget/function/f_diag.c +++ b/drivers/usb/gadget/function/f_diag.c @@ -259,7 +259,7 @@ static void diag_update_pid_and_serial_num(struct diag_context *ctxt) } update_dload: - pr_debug("%s: dload:%p pid:%x serial_num:%s\n", + pr_debug("%s: dload:%pK pid:%x serial_num:%s\n", __func__, diag_dload, local_diag_dload.pid, local_diag_dload.serial_number); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a770d23ac301a12a7795e190196d4e983e47d47e..d25bb0cebd44789defe8e40c9b2ab601cc7a2801 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -141,6 +141,7 @@ struct ffs_epfile { struct ffs_data *ffs; struct ffs_ep *ep; /* P: ffs->eps_lock */ + atomic_t opened; struct dentry *dentry; @@ -207,7 +208,7 @@ struct ffs_epfile { unsigned char in; /* P: ffs->eps_lock */ unsigned char isoc; /* P: ffs->eps_lock */ - unsigned char _pad; + bool invalid; }; struct ffs_buffer { @@ -478,7 +479,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, break; } - ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret, + ffs_log("exit:ret %zd state %d setup_state %d flags %lu", ret, ffs->state, ffs->setup_state, ffs->flags); mutex_unlock(&ffs->mutex); @@ -810,7 +811,7 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->req->actual; bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; - ffs_log("enter: ret %d", ret); + ffs_log("enter: ret %d for %s", ret, io_data->read ? "read" : "write"); if (io_data->read && ret > 0) { mm_segment_t oldfs = get_fs(); @@ -849,8 +850,6 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, INIT_WORK(&io_data->work, ffs_user_copy_worker); queue_work(ffs->io_completion_wq, &io_data->work); - - ffs_log("exit"); } static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) @@ -944,7 +943,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ssize_t ret, data_len = -EINVAL; int halt; - ffs_log("enter: epfile name %s", epfile->name); + ffs_log("enter: %s", epfile->name); /* Are we still active? */ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) @@ -956,6 +955,16 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) if (file->f_flags & O_NONBLOCK) return -EAGAIN; + /* + * epfile->invalid is set when EPs are disabled. Userspace + * might have stale threads continuing to do I/O and may be + * unaware of that especially if we block here. Instead return + * an error immediately here and don't allow any more I/O + * until the epfile is reopened. + */ + if (epfile->invalid) + return -ENODEV; + ret = wait_event_interruptible( epfile->ffs->wait, (ep = epfile->ep)); if (ret) @@ -1063,6 +1072,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); + ffs_log("queued %d bytes on %s", data_len, epfile->name); + if (unlikely(wait_for_completion_interruptible(&done))) { /* * To avoid race condition with ffs_epfile_io_complete, @@ -1102,6 +1113,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) goto error_lock; } + ffs_log("queued %d bytes on %s", data_len, epfile->name); + ret = -EIOCBQUEUED; /* * Do not kfree the buffer in this function. It will be freed @@ -1117,7 +1130,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) error: kfree(data); - ffs_log("exit: ret %zu", ret); + ffs_log("exit: %s ret %zd", epfile->name, ret); return ret; } @@ -1129,17 +1142,16 @@ ffs_epfile_open(struct inode *inode, struct file *file) ENTER(); - ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, - epfile->ffs->setup_state, epfile->ffs->flags); + ffs_log("%s: state %d setup_state %d flag %lu opened %u", + epfile->name, epfile->ffs->state, epfile->ffs->setup_state, + epfile->ffs->flags, atomic_read(&epfile->opened)); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; file->private_data = epfile; ffs_data_opened(epfile->ffs); - - ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state, - epfile->ffs->setup_state, epfile->ffs->flags); + atomic_inc(&epfile->opened); return 0; } @@ -1205,7 +1217,7 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) else *from = p->data; - ffs_log("exit"); + ffs_log("exit: ret %zd", res); return res; } @@ -1258,7 +1270,7 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) *to = p->data; } - ffs_log("enter"); + ffs_log("exit: ret %zd", res); return res; } @@ -1271,12 +1283,14 @@ ffs_epfile_release(struct inode *inode, struct file *file) ENTER(); __ffs_epfile_read_buffer_free(epfile); - ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, - epfile->ffs->setup_state, epfile->ffs->flags); + ffs_log("%s: state %d setup_state %d flag %lu opened %u", + epfile->name, epfile->ffs->state, epfile->ffs->setup_state, + epfile->ffs->flags, atomic_read(&epfile->opened)); - ffs_data_closed(epfile->ffs); + if (atomic_dec_and_test(&epfile->opened)) + epfile->invalid = false; - ffs_log("exit"); + ffs_data_closed(epfile->ffs); return 0; } @@ -1290,7 +1304,8 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, ENTER(); - ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, + ffs_log("%s: code 0x%08x value %#lx state %d setup_state %d flag %lu", + epfile->name, code, value, epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) @@ -1302,6 +1317,10 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, if (file->f_flags & O_NONBLOCK) return -EAGAIN; + /* don't allow any I/O until file is reopened */ + if (epfile->invalid) + return -ENODEV; + ret = wait_event_interruptible( epfile->ffs->wait, (ep = epfile->ep)); if (ret) @@ -1358,7 +1377,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, } spin_unlock_irq(&epfile->ffs->eps_lock); - ffs_log("exit:ret %d", ret); + ffs_log("exit: %s: ret %d\n", epfile->name, ret); return ret; } @@ -1412,8 +1431,6 @@ ffs_sb_make_inode(struct super_block *sb, void *data, inode->i_op = iops; } - ffs_log("exit"); - return inode; } @@ -1442,8 +1459,6 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb, d_add(dentry, inode); - ffs_log("exit"); - return dentry; } @@ -1495,8 +1510,6 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) &ffs_ep0_operations))) return -ENOMEM; - ffs_log("exit"); - return 0; } @@ -1588,8 +1601,6 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) opts = comma + 1; } - ffs_log("exit"); - return 0; } @@ -1647,8 +1658,6 @@ ffs_fs_mount(struct file_system_type *t, int flags, ffs_data_put(data.ffs_data); } - ffs_log("exit"); - return rv; } @@ -1664,8 +1673,6 @@ ffs_fs_kill_sb(struct super_block *sb) ffs_release_dev(sb->s_fs_info); ffs_data_closed(sb->s_fs_info); } - - ffs_log("exit"); } static struct file_system_type ffs_fs_type = { @@ -1721,19 +1728,18 @@ static void ffs_data_get(struct ffs_data *ffs) { ENTER(); - ffs_log("enter"); + ffs_log("ref %u", refcount_read(&ffs->ref)); refcount_inc(&ffs->ref); - - ffs_log("exit"); } static void ffs_data_opened(struct ffs_data *ffs) { ENTER(); - ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state, - ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); + ffs_log("enter: state %d setup_state %d flag %lu opened %d ref %d", + ffs->state, ffs->setup_state, ffs->flags, + atomic_read(&ffs->opened), refcount_read(&ffs->ref)); refcount_inc(&ffs->ref); if (atomic_add_return(1, &ffs->opened) == 1 && @@ -1741,16 +1747,13 @@ static void ffs_data_opened(struct ffs_data *ffs) ffs->state = FFS_CLOSING; ffs_data_reset(ffs); } - - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); } static void ffs_data_put(struct ffs_data *ffs) { ENTER(); - ffs_log("enter"); + ffs_log("ref %u", refcount_read(&ffs->ref)); if (unlikely(refcount_dec_and_test(&ffs->ref))) { pr_info("%s(): freeing\n", __func__); @@ -1762,15 +1765,13 @@ static void ffs_data_put(struct ffs_data *ffs) kfree(ffs->dev_name); kfree(ffs); } - - ffs_log("exit"); } static void ffs_data_closed(struct ffs_data *ffs) { ENTER(); - ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state, + ffs_log("state %d setup_state %d flag %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); if (atomic_dec_and_test(&ffs->opened)) { @@ -1793,9 +1794,6 @@ static void ffs_data_closed(struct ffs_data *ffs) ffs_data_reset(ffs); } - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); - ffs_data_put(ffs); } @@ -1827,8 +1825,6 @@ static struct ffs_data *ffs_data_new(const char *dev_name) /* XXX REVISIT need to update it in some places, or do we? */ ffs->ev.can_stall = 1; - ffs_log("exit"); - return ffs; } @@ -1854,9 +1850,6 @@ static void ffs_data_clear(struct ffs_data *ffs) kfree(ffs->raw_descs_data); kfree(ffs->raw_strings); kfree(ffs->stringtabs); - - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); } static void ffs_data_reset(struct ffs_data *ffs) @@ -1888,9 +1881,6 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; - - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); } @@ -1930,9 +1920,6 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) ffs->gadget = cdev->gadget; - ffs_log("exit: state %d setup_state %d flag %lu gadget %pK\n", - ffs->state, ffs->setup_state, ffs->flags, ffs->gadget); - ffs_data_get(ffs); return 0; } @@ -1959,8 +1946,8 @@ static int ffs_epfiles_create(struct ffs_data *ffs) ENTER(); - ffs_log("enter: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); + ffs_log("enter: eps_count %u state %d setup_state %d flag %lu", + ffs->eps_count, ffs->state, ffs->setup_state, ffs->flags); count = ffs->eps_count; epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL); @@ -1982,13 +1969,12 @@ static int ffs_epfiles_create(struct ffs_data *ffs) ffs_epfiles_destroy(epfiles, i - 1); return -ENOMEM; } + + atomic_set(&epfile->opened, 0); } ffs->epfiles = epfiles; - ffs_log("exit: eps_count %u state %d setup_state %d flag %lu", - count, ffs->state, ffs->setup_state, ffs->flags); - return 0; } @@ -2010,8 +1996,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) } kfree(epfiles); - - ffs_log("exit"); } static void ffs_func_eps_disable(struct ffs_function *func) @@ -2032,14 +2016,13 @@ static void ffs_func_eps_disable(struct ffs_function *func) ++ep; if (epfile) { + epfile->invalid = true; /* until file is reopened */ epfile->ep = NULL; __ffs_epfile_read_buffer_free(epfile); ++epfile; } } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); - - ffs_log("exit"); } static int ffs_func_eps_enable(struct ffs_function *func) @@ -2072,6 +2055,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); ffs_log("usb_ep_enable %s", ep->ep->name); } else { + ffs_log("usb_ep_enable %s ret %d", ep->ep->name, ret); break; } @@ -2082,8 +2066,6 @@ static int ffs_func_eps_enable(struct ffs_function *func) wake_up_interruptible(&ffs->wait); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); - ffs_log("exit: ret %d", ret); - return ret; } @@ -2284,8 +2266,6 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, data += ret; ++num; } - - ffs_log("exit: len %u", len); } static int __ffs_data_do_entity(enum ffs_entity_type type, @@ -2337,8 +2317,6 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, break; } - ffs_log("exit"); - return 0; } @@ -2348,7 +2326,7 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, u16 bcd_version = le16_to_cpu(desc->bcdVersion); u16 w_index = le16_to_cpu(desc->wIndex); - ffs_log("enter"); + ffs_log("enter: bcd:%x w_index:%d", bcd_version, w_index); if (bcd_version != 1) { pr_vdebug("unsupported os descriptors version: %d", @@ -2367,8 +2345,6 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, return -EINVAL; } - ffs_log("exit: size of desc %zu", sizeof(*desc)); - return sizeof(*desc); } @@ -2394,14 +2370,13 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len, while (feature_count--) { ret = entity(type, h, data, len, priv); if (unlikely(ret < 0)) { - pr_debug("bad OS descriptor, type: %d\n", type); + ffs_log("bad OS descriptor, type: %d\n", type); return ret; } data += ret; len -= ret; } - ffs_log("exit"); return _len - len; } @@ -2439,7 +2414,7 @@ static int __must_check ffs_do_os_descs(unsigned count, ret = __ffs_do_os_desc_header(&type, desc); if (unlikely(ret < 0)) { - pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n", + ffs_log("entity OS_DESCRIPTOR(%02lx); ret = %d\n", num, ret); return ret; } @@ -2460,7 +2435,7 @@ static int __must_check ffs_do_os_descs(unsigned count, ret = ffs_do_single_os_desc(data, len, type, feature_count, entity, priv, desc); if (unlikely(ret < 0)) { - pr_debug("%s returns %d\n", __func__, ret); + ffs_log("%s returns %d\n", __func__, ret); return ret; } @@ -2468,8 +2443,6 @@ static int __must_check ffs_do_os_descs(unsigned count, data += ret; } - ffs_log("exit"); - return _len - len; } @@ -2485,7 +2458,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, ENTER(); - ffs_log("enter: len %u", len); + ffs_log("enter: type %d len %u", type, len); switch (type) { case FFS_OS_DESC_EXT_COMPAT: { @@ -2552,8 +2525,6 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, return -EINVAL; } - ffs_log("exit"); - return length; } @@ -2684,13 +2655,10 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, ffs->ss_descs_count = counts[2]; ffs->ms_os_descs_count = os_descs_count; - ffs_log("exit"); - return 0; error: kfree(_data); - ffs_log("exit: ret %d", ret); return ret; } @@ -2820,14 +2788,12 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, ffs->stringtabs = stringtabs; ffs->raw_strings = _data; - ffs_log("exit"); return 0; error_free: kfree(stringtabs); error: kfree(_data); - ffs_log("exit: -EINVAL"); return -EINVAL; } @@ -2902,9 +2868,6 @@ static void __ffs_event_add(struct ffs_data *ffs, wake_up_locked(&ffs->ev.waitq); if (ffs->ffs_eventfd) eventfd_signal(ffs->ffs_eventfd, 1); - - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); } static void ffs_event_add(struct ffs_data *ffs, @@ -3016,8 +2979,6 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, } ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); - ffs_log("exit"); - return 0; } @@ -3159,8 +3120,6 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type, pr_vdebug("unknown descriptor: %d\n", type); } - ffs_log("exit"); - return length; } @@ -3204,14 +3163,14 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f, */ if (!ffs_opts->refcnt) { ret = functionfs_bind(func->ffs, c->cdev); - if (ret) + if (ret) { + ffs_log("functionfs_bind returned %d", ret); return ERR_PTR(ret); + } } ffs_opts->refcnt++; func->function.strings = func->ffs->stringtabs; - ffs_log("exit"); - return ffs_opts; } @@ -3376,9 +3335,6 @@ static int _ffs_func_bind(struct usb_configuration *c, /* And we're done */ ffs_event_add(ffs, FUNCTIONFS_BIND); - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); - return 0; error: @@ -3403,8 +3359,6 @@ static int ffs_func_bind(struct usb_configuration *c, if (ret && !--ffs_opts->refcnt) functionfs_unbind(func->ffs); - ffs_log("exit: ret %d", ret); - return ret; } @@ -3419,8 +3373,6 @@ static void ffs_reset_work(struct work_struct *work) ffs_log("enter"); ffs_data_reset(ffs); - - ffs_log("exit"); } static int ffs_func_set_alt(struct usb_function *f, @@ -3430,7 +3382,7 @@ static int ffs_func_set_alt(struct usb_function *f, struct ffs_data *ffs = func->ffs; int ret = 0, intf; - ffs_log("enter"); + ffs_log("enter: alt %d", (int)alt); if (alt != (unsigned)-1) { intf = ffs_func_revmap_intf(func, interface); @@ -3441,6 +3393,8 @@ static int ffs_func_set_alt(struct usb_function *f, if (ffs->func) { ffs_func_eps_disable(ffs->func); ffs->func = NULL; + /* matching put to allow LPM on disconnect */ + usb_gadget_autopm_put_async(ffs->gadget); } if (ffs->state == FFS_DEACTIVATED) { @@ -3466,21 +3420,14 @@ static int ffs_func_set_alt(struct usb_function *f, /* Disable USB LPM later on bus_suspend */ usb_gadget_autopm_get_async(ffs->gadget); } - ffs_log("exit: ret %d", ret); return ret; } static void ffs_func_disable(struct usb_function *f) { - struct ffs_function *func = ffs_func_from_usb(f); - struct ffs_data *ffs = func->ffs; - ffs_log("enter"); ffs_func_set_alt(f, 0, (unsigned)-1); - /* matching put to allow LPM on disconnect */ - usb_gadget_autopm_put_async(ffs->gadget); - ffs_log("exit"); } static int ffs_func_setup(struct usb_function *f, @@ -3493,14 +3440,17 @@ static int ffs_func_setup(struct usb_function *f, ENTER(); - ffs_log("enter"); - pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType); pr_vdebug("creq->bRequest = %02x\n", creq->bRequest); pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue)); pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex)); pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength)); + ffs_log("enter: state %d reqtype=%02x req=%02x wv=%04x wi=%04x wl=%04x", + ffs->state, creq->bRequestType, creq->bRequest, + le16_to_cpu(creq->wValue), le16_to_cpu(creq->wIndex), + le16_to_cpu(creq->wLength)); + /* * Most requests directed to interface go through here * (notable exceptions are set/get interface) so we need to @@ -3542,8 +3492,6 @@ static int ffs_func_setup(struct usb_function *f, __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); - ffs_log("exit"); - return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; } @@ -3581,8 +3529,6 @@ static void ffs_func_suspend(struct usb_function *f) ffs_log("enter"); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); - - ffs_log("exit"); } static void ffs_func_resume(struct usb_function *f) @@ -3592,8 +3538,6 @@ static void ffs_func_resume(struct usb_function *f) ffs_log("enter"); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); - - ffs_log("exit"); } @@ -3610,15 +3554,11 @@ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) short *nums = func->interfaces_nums; unsigned count = func->ffs->interfaces_count; - ffs_log("enter"); - for (; count; --count, ++nums) { if (*nums >= 0 && *nums == intf) return nums - func->interfaces_nums; } - ffs_log("exit"); - return -EDOM; } @@ -3641,8 +3581,6 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name) return dev; } - ffs_log("exit"); - return NULL; } @@ -3661,8 +3599,6 @@ static struct ffs_dev *_ffs_get_single_dev(void) return dev; } - ffs_log("exit"); - return NULL; } @@ -3681,8 +3617,6 @@ static struct ffs_dev *_ffs_find_dev(const char *name) dev = _ffs_do_find_dev(name); - ffs_log("exit"); - return dev; } @@ -3883,8 +3817,6 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name) ffs_dev_unlock(); - ffs_log("exit"); - return ret; } EXPORT_SYMBOL_GPL(ffs_name_dev); @@ -3905,8 +3837,6 @@ int ffs_single_dev(struct ffs_dev *dev) ffs_dev_unlock(); - ffs_log("exit"); - return ret; } EXPORT_SYMBOL_GPL(ffs_single_dev); @@ -3928,8 +3858,6 @@ static void _ffs_free_dev(struct ffs_dev *dev) kfree(dev); if (list_empty(&ffs_devices)) functionfs_cleanup(); - - ffs_log("exit"); } static void *ffs_acquire_dev(const char *dev_name) @@ -3955,8 +3883,6 @@ static void *ffs_acquire_dev(const char *dev_name) ffs_dev_unlock(); - ffs_log("exit"); - return ffs_dev; } @@ -3979,8 +3905,6 @@ static void ffs_release_dev(struct ffs_data *ffs_data) } ffs_dev_unlock(); - - ffs_log("exit"); } static int ffs_ready(struct ffs_data *ffs) @@ -4017,7 +3941,7 @@ static int ffs_ready(struct ffs_data *ffs) done: ffs_dev_unlock(); - ffs_log("exit"); + ffs_log("exit: ret %d", ret); return ret; } @@ -4062,7 +3986,6 @@ static void ffs_closed(struct ffs_data *ffs) ffs_log("unreg gadget done"); } - ffs_log("exit"); return; done: ffs_dev_unlock(); diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index f2d922dacbd4339fdfb823c5ebafa53a447ff801..fb2907027d2934694858e6e55041f557dd8ca7b9 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -15,6 +15,12 @@ #include "f_gsi.h" #include "rndis.h" +struct usb_gsi_debugfs { + struct dentry *debugfs_root; +}; + +static struct usb_gsi_debugfs debugfs; + static bool qti_packet_debug; module_param(qti_packet_debug, bool, 0644); MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data"); @@ -182,6 +188,212 @@ static int gsi_wakeup_host(struct f_gsi *gsi) return ret; } +static void gsi_rw_timer_func(unsigned long arg) +{ + struct f_gsi *gsi = (struct f_gsi *)arg; + + if (!atomic_read(&gsi->connected)) { + log_event_dbg("%s: gsi not connected.. bail-out\n", __func__); + gsi->debugfs_rw_timer_enable = 0; + return; + } + + log_event_dbg("%s: calling gsi_wakeup_host\n", __func__); + gsi_wakeup_host(gsi); + + if (gsi->debugfs_rw_timer_enable) { + log_event_dbg("%s: re-arm the timer\n", __func__); + mod_timer(&gsi->gsi_rw_timer, + jiffies + msecs_to_jiffies(gsi->gsi_rw_timer_interval)); + } +} + +static struct f_gsi *get_connected_gsi(void) +{ + struct f_gsi *connected_gsi; + bool gsi_connected = false; + int i; + + for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++) { + if (inst_status[i].opts) + connected_gsi = inst_status[i].opts->gsi; + else + continue; + + if (connected_gsi && atomic_read(&connected_gsi->connected)) { + gsi_connected = true; + break; + } + } + + if (!gsi_connected) + connected_gsi = NULL; + + return connected_gsi; +} + +#define DEFAULT_RW_TIMER_INTERVAL 500 /* in ms */ +static ssize_t usb_gsi_rw_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct f_gsi *gsi; + u8 input; + int ret; + + gsi = get_connected_gsi(); + if (!gsi) { + log_event_dbg("%s: gsi not connected\n", __func__); + goto err; + } + + if (ubuf == NULL) { + log_event_dbg("%s: buffer is Null.\n", __func__); + goto err; + } + + ret = kstrtou8_from_user(ubuf, count, 0, &input); + if (ret) { + log_event_err("%s: Invalid value. err:%d\n", __func__, ret); + goto err; + } + + if (gsi->debugfs_rw_timer_enable == !!input) { + if (!!input) + log_event_dbg("%s: RW already enabled\n", __func__); + else + log_event_dbg("%s: RW already disabled\n", __func__); + goto err; + } + + gsi->debugfs_rw_timer_enable = !!input; + + if (gsi->debugfs_rw_timer_enable) { + mod_timer(&gsi->gsi_rw_timer, jiffies + + msecs_to_jiffies(gsi->gsi_rw_timer_interval)); + log_event_dbg("%s: timer initialized\n", __func__); + } else { + del_timer_sync(&gsi->gsi_rw_timer); + log_event_dbg("%s: timer deleted\n", __func__); + } + +err: + return count; +} + +static int usb_gsi_rw_show(struct seq_file *s, void *unused) +{ + + struct f_gsi *gsi; + + gsi = get_connected_gsi(); + if (!gsi) { + log_event_dbg("%s: gsi not connected\n", __func__); + return 0; + } + + seq_printf(s, "%d\n", gsi->debugfs_rw_timer_enable); + + return 0; +} + +static int usb_gsi_rw_open(struct inode *inode, struct file *f) +{ + return single_open(f, usb_gsi_rw_show, inode->i_private); +} + +static const struct file_operations fops_usb_gsi_rw = { + .open = usb_gsi_rw_open, + .read = seq_read, + .write = usb_gsi_rw_write, + .owner = THIS_MODULE, + .llseek = seq_lseek, + .release = seq_release, +}; + +static ssize_t usb_gsi_rw_timer_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct f_gsi *gsi; + u16 timer_val; + int ret; + + gsi = get_connected_gsi(); + if (!gsi) { + log_event_dbg("%s: gsi not connected\n", __func__); + goto err; + } + + if (ubuf == NULL) { + log_event_dbg("%s: buffer is NULL.\n", __func__); + goto err; + } + + ret = kstrtou16_from_user(ubuf, count, 0, &timer_val); + if (ret) { + log_event_err("%s: Invalid value. err:%d\n", __func__, ret); + goto err; + } + + if (timer_val <= 0 || timer_val > 10000) { + log_event_err("%s: value must be > 0 and < 10000.\n", __func__); + goto err; + } + + gsi->gsi_rw_timer_interval = timer_val; +err: + return count; +} + +static int usb_gsi_rw_timer_show(struct seq_file *s, void *unused) +{ + struct f_gsi *gsi; + + gsi = get_connected_gsi(); + if (!gsi) { + log_event_dbg("%s: gsi not connected\n", __func__); + return 0; + } + + seq_printf(s, "%ums\n", gsi->gsi_rw_timer_interval); + + return 0; +} + +static int usb_gsi_rw_timer_open(struct inode *inode, struct file *f) +{ + return single_open(f, usb_gsi_rw_timer_show, inode->i_private); +} + +static const struct file_operations fops_usb_gsi_rw_timer = { + .open = usb_gsi_rw_timer_open, + .read = seq_read, + .write = usb_gsi_rw_timer_write, + .owner = THIS_MODULE, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int usb_gsi_debugfs_init(void) +{ + debugfs.debugfs_root = debugfs_create_dir("usb_gsi", NULL); + if (!debugfs.debugfs_root) + return -ENOMEM; + + debugfs_create_file("remote_wakeup_enable", 0600, + debugfs.debugfs_root, + inst_status, &fops_usb_gsi_rw); + debugfs_create_file("remote_wakeup_interval", 0600, + debugfs.debugfs_root, + inst_status, + &fops_usb_gsi_rw_timer); + return 0; +} + +static void usb_gsi_debugfs_exit(void) +{ + debugfs_remove_recursive(debugfs.debugfs_root); +} + /* * Callback for when when network interface is up * and userspace is ready to answer DHCP requests, or remote wakeup @@ -374,7 +586,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port) /* Populate connection params */ conn_params->max_pkt_size = - (cdev->gadget->speed == USB_SPEED_SUPER) ? + (cdev->gadget->speed >= USB_SPEED_SUPER) ? IPA_USB_SUPER_SPEED_1024B : IPA_USB_HIGH_SPEED_512B; conn_params->ipa_to_usb_xferrscidx = d_port->in_xfer_rsc_index; @@ -938,7 +1150,7 @@ static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp) struct gsi_inst_status *inst_cur; if (!c_port) { - pr_err_ratelimited("%s: gsi ctrl port %p", __func__, c_port); + pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port); return -ENODEV; } @@ -1027,7 +1239,7 @@ gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) gsi = inst_cur->opts->gsi; c_port = &inst_cur->opts->gsi->c_port; if (!c_port) { - log_event_err("%s: gsi ctrl port %p", __func__, c_port); + log_event_err("%s: gsi ctrl port %pK", __func__, c_port); return -ENODEV; } @@ -1116,7 +1328,7 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf, req = c_port->notify_req; if (!c_port || !req || !req->buf) { - log_event_err("%s: c_port %p req %p req->buf %p", + log_event_err("%s: c_port %pK req %p req->buf %p", __func__, c_port, req, req ? req->buf : req); return -ENODEV; } @@ -1195,7 +1407,7 @@ static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd, c_port = &gsi->c_port; if (!c_port) { - log_event_err("%s: gsi ctrl port %p", __func__, c_port); + log_event_err("%s: gsi ctrl port %pK", __func__, c_port); return -ENODEV; } @@ -1366,7 +1578,7 @@ static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait) gsi = inst_cur->opts->gsi; c_port = &inst_cur->opts->gsi->c_port; if (!c_port) { - log_event_err("%s: gsi ctrl port %p", __func__, c_port); + log_event_err("%s: gsi ctrl port %pK", __func__, c_port); return -ENODEV; } @@ -1516,7 +1728,7 @@ void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param) struct gsi_data_port *d_port; if (!gsi) { - pr_err("%s: gsi prot ctx is %p", __func__, gsi); + pr_err("%s: gsi prot ctx is %pK", __func__, gsi); return; } @@ -1739,7 +1951,7 @@ gsi_ctrl_set_ntb_cmd_complete(struct usb_ep *ep, struct usb_request *req) struct f_gsi *gsi = req->context; struct gsi_ntb_info *ntb = NULL; - log_event_dbg("dev:%p", gsi); + log_event_dbg("dev:%pK", gsi); req->context = NULL; if (req->status || req->actual != req->length) { @@ -2258,6 +2470,9 @@ static void gsi_disable(struct usb_function *f) atomic_set(&gsi->connected, 0); + del_timer(&gsi->gsi_rw_timer); + gsi->debugfs_rw_timer_enable = 0; + if (gsi->prot_id == IPA_USB_RNDIS) rndis_uninit(gsi->params); @@ -2306,6 +2521,16 @@ static void gsi_suspend(struct usb_function *f) post_event(&gsi->d_port, EVT_SUSPEND); queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w); log_event_dbg("gsi suspended"); + + /* + * If host suspended bus without receiving notification request then + * initiate remote-wakeup. As driver won't be able to do it later since + * notification request is already queued. + */ + if (gsi->c_port.notify_req_queued && usb_gsi_remote_wakeup_allowed(f)) { + mod_timer(&gsi->gsi_rw_timer, jiffies + msecs_to_jiffies(2000)); + log_event_dbg("%s: pending response, arm rw_timer\n", __func__); + } } static void gsi_resume(struct usb_function *f) @@ -2323,6 +2548,10 @@ static void gsi_resume(struct usb_function *f) f->func_is_suspended) return; + /* Keep timer enabled if user enabled using debugfs */ + if (!gsi->debugfs_rw_timer_enable) + del_timer(&gsi->gsi_rw_timer); + if (gsi->c_port.notify && !gsi->c_port.notify->desc) config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify); @@ -2347,6 +2576,11 @@ static void gsi_resume(struct usb_function *f) static int gsi_get_status(struct usb_function *f) { unsigned int remote_wakeup_en_status = f->func_wakeup_allowed ? 1 : 0; + struct f_gsi *gsi = func_to_gsi(f); + + /* Disable function remote wake-up for DPL interface */ + if (gsi->prot_id == IPA_USB_DIAG) + return 0; return (remote_wakeup_en_status << FUNC_WAKEUP_ENABLE_SHIFT) | (1 << FUNC_WAKEUP_CAPABLE_SHIFT); @@ -3054,6 +3288,8 @@ static struct f_gsi *gsi_function_init(enum ipa_usb_teth_prot prot_id) kfree(gsi); goto error; } + gsi->gsi_rw_timer_interval = DEFAULT_RW_TIMER_INTERVAL; + setup_timer(&gsi->gsi_rw_timer, gsi_rw_timer_func, (unsigned long) gsi); return gsi; error: @@ -3443,6 +3679,7 @@ static int fgsi_init(void) major = MAJOR(dev); + usb_gsi_debugfs_init(); return usb_function_register(&gsiusb_func); } module_init(fgsi_init); @@ -3459,5 +3696,6 @@ static void __exit fgsi_exit(void) } class_destroy(gsi_class); + usb_gsi_debugfs_exit(); } module_exit(fgsi_exit); diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 06b1bb4845314783d71947e6d08c8997d8dbd0fa..db8c4925657a9b0266469f7edc64a332d8953a2a 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -26,6 +26,7 @@ #include #include #include +#include #define GSI_RMNET_CTRL_NAME "rmnet_ctrl" #define GSI_MBIM_CTRL_NAME "android_mbim" @@ -280,6 +281,12 @@ struct f_gsi { struct gsi_ctrl_port c_port; void *ipc_log_ctxt; bool rmnet_dtr_status; + + /* To test remote wakeup using debugfs */ + struct timer_list gsi_rw_timer; + u8 debugfs_rw_timer_enable; + u16 gsi_rw_timer_interval; + }; static inline struct f_gsi *func_to_gsi(struct usb_function *f) diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index d050dd56d67f00f33844fc9cd9e12623b91405d8..8e1f9455ed7725a15afe29b253fdc00cd00747be 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -26,6 +26,8 @@ #include #include +#include +#include #include #include #include @@ -40,6 +42,8 @@ #include "configfs.h" +#define MTP_RX_BUFFER_INIT_SIZE 1048576 +#define MTP_TX_BUFFER_INIT_SIZE 1048576 #define MTP_BULK_BUFFER_SIZE 16384 #define INTR_BUFFER_SIZE 28 #define MAX_INST_NAME_LEN 40 @@ -56,7 +60,7 @@ #define STATE_ERROR 4 /* error from completion routine */ /* number of tx and rx requests to allocate */ -#define TX_REQ_MAX 4 +#define MTP_TX_REQ_MAX 8 #define RX_REQ_MAX 2 #define INTR_REQ_MAX 5 @@ -74,6 +78,17 @@ #define MTP_RESPONSE_DEVICE_BUSY 0x2019 #define DRIVER_NAME "mtp" +#define MAX_ITERATION 100 + +unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE; +module_param(mtp_rx_req_len, uint, 0644); + +unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE; +module_param(mtp_tx_req_len, uint, 0644); + +unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX; +module_param(mtp_tx_reqs, uint, 0644); + static const char mtp_shortname[] = DRIVER_NAME "_usb"; struct mtp_dev { @@ -114,6 +129,14 @@ struct mtp_dev { uint16_t xfer_command; uint32_t xfer_transaction_id; int xfer_result; + struct { + unsigned long vfs_rbytes; + unsigned long vfs_wbytes; + unsigned int vfs_rtime; + unsigned int vfs_wtime; + } perf[MAX_ITERATION]; + unsigned int dbg_read_index; + unsigned int dbg_write_index; }; static struct usb_interface_descriptor mtp_interface_desc = { @@ -434,7 +457,7 @@ static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; - if (req->status != 0) + if (req->status != 0 && dev->state != STATE_OFFLINE) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->tx_idle, req); @@ -447,7 +470,7 @@ static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) struct mtp_dev *dev = _mtp_dev; dev->rx_done = 1; - if (req->status != 0) + if (req->status != 0 && dev->state != STATE_OFFLINE) dev->state = STATE_ERROR; wake_up(&dev->read_wq); @@ -457,7 +480,7 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; - if (req->status != 0) + if (req->status != 0 && dev->state != STATE_OFFLINE) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->intr_idle, req); @@ -475,7 +498,7 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev, struct usb_ep *ep; int i; - DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { @@ -504,18 +527,43 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev, ep->driver_data = dev; /* claim the endpoint */ dev->ep_intr = ep; +retry_tx_alloc: /* now allocate requests for our endpoints */ - for (i = 0; i < TX_REQ_MAX; i++) { - req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE); - if (!req) - goto fail; + for (i = 0; i < mtp_tx_reqs; i++) { + req = mtp_request_new(dev->ep_in, mtp_tx_req_len); + if (!req) { + if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE) + goto fail; + while ((req = mtp_req_get(dev, &dev->tx_idle))) + mtp_request_free(req, dev->ep_in); + mtp_tx_req_len = MTP_BULK_BUFFER_SIZE; + mtp_tx_reqs = MTP_TX_REQ_MAX; + goto retry_tx_alloc; + } req->complete = mtp_complete_in; mtp_req_put(dev, &dev->tx_idle, req); } + + /* + * The RX buffer should be aligned to EP max packet for + * some controllers. At bind time, we don't know the + * operational speed. Hence assuming super speed max + * packet size. + */ + if (mtp_rx_req_len % 1024) + mtp_rx_req_len = MTP_BULK_BUFFER_SIZE; + +retry_rx_alloc: for (i = 0; i < RX_REQ_MAX; i++) { - req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE); - if (!req) - goto fail; + req = mtp_request_new(dev->ep_out, mtp_rx_req_len); + if (!req) { + if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE) + goto fail; + for (--i; i >= 0; i--) + mtp_request_free(dev->rx_req[i], dev->ep_out); + mtp_rx_req_len = MTP_BULK_BUFFER_SIZE; + goto retry_rx_alloc; + } req->complete = mtp_complete_out; dev->rx_req[i] = req; } @@ -540,12 +588,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; - ssize_t r = count; - unsigned xfer; + ssize_t r = count, xfer, len; int ret = 0; - size_t len = 0; - DBG(cdev, "mtp_read(%zu)\n", count); + DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state); /* we will block until we're online */ DBG(cdev, "mtp_read: waiting for online state\n"); @@ -555,6 +601,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, r = ret; goto done; } + + len = ALIGN(count, dev->ep_out->maxpacket); + if (len > mtp_rx_req_len) + return -EINVAL; + spin_lock_irq(&dev->lock); if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); @@ -593,11 +644,21 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, r = -EIO; goto done; } else { - DBG(cdev, "rx %p queue\n", req); + DBG(cdev, "rx %pK queue\n", req); } /* wait for a request to complete */ - ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + if (!dev->rx_done) + usb_ep_dequeue(dev->ep_out, req); + spin_lock_irq(&dev->lock); + dev->state = STATE_CANCELED; + spin_unlock_irq(&dev->lock); + goto done; + } if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); @@ -608,7 +669,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, if (req->actual == 0) goto requeue_req; - DBG(cdev, "rx %p %d\n", req, req->actual); + DBG(cdev, "rx %pK %d\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; r = xfer; if (copy_to_user(buf, req->buf, xfer)) @@ -624,7 +685,7 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, dev->state = STATE_READY; spin_unlock_irq(&dev->lock); - DBG(cdev, "mtp_read returning %zd\n", r); + DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state); return r; } @@ -639,7 +700,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf, int sendZLP = 0; int ret; - DBG(cdev, "mtp_write(%zu)\n", count); + DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { @@ -678,12 +739,14 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf, ((req = mtp_req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY)); if (!req) { + DBG(cdev, "%s request NULL ret:%d state:%d\n", + __func__, ret, dev->state); r = ret; break; } - if (count > MTP_BULK_BUFFER_SIZE) - xfer = MTP_BULK_BUFFER_SIZE; + if (count > mtp_tx_req_len) + xfer = mtp_tx_req_len; else xfer = count; if (xfer && copy_from_user(req->buf, buf, xfer)) { @@ -716,7 +779,7 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf, dev->state = STATE_READY; spin_unlock_irq(&dev->lock); - DBG(cdev, "mtp_write returning %zd\n", r); + DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state); return r; } @@ -734,6 +797,7 @@ static void send_file_work(struct work_struct *data) int xfer, ret, hdr_size; int r = 0; int sendZLP = 0; + ktime_t start_time; /* read our parameters */ smp_rmb(); @@ -771,12 +835,15 @@ static void send_file_work(struct work_struct *data) break; } if (!req) { + DBG(cdev, + "%s request NULL ret:%d state:%d\n", __func__, + ret, dev->state); r = ret; break; } - if (count > MTP_BULK_BUFFER_SIZE) - xfer = MTP_BULK_BUFFER_SIZE; + if (count > mtp_tx_req_len) + xfer = mtp_tx_req_len; else xfer = count; @@ -794,21 +861,27 @@ static void send_file_work(struct work_struct *data) header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id); } - + start_time = ktime_get(); ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset); if (ret < 0) { r = ret; break; } + xfer = ret + hdr_size; + dev->perf[dev->dbg_read_index].vfs_rtime = + ktime_to_us(ktime_sub(ktime_get(), start_time)); + dev->perf[dev->dbg_read_index].vfs_rbytes = xfer; + dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION; hdr_size = 0; req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; r = -EIO; break; } @@ -822,7 +895,7 @@ static void send_file_work(struct work_struct *data) if (req) mtp_req_put(dev, &dev->tx_idle, req); - DBG(cdev, "send_file_work returning %d\n", r); + DBG(cdev, "%s returning %d state:%d\n", __func__, r, dev->state); /* write the result */ dev->xfer_result = r; smp_wmb(); @@ -840,6 +913,7 @@ static void receive_file_work(struct work_struct *data) int64_t count; int ret, cur_buf = 0; int r = 0; + ktime_t start_time; /* read our parameters */ smp_rmb(); @@ -848,6 +922,9 @@ static void receive_file_work(struct work_struct *data) count = dev->xfer_file_length; DBG(cdev, "receive_file_work(%lld)\n", count); + if (!IS_ALIGNED(count, dev->ep_out->maxpacket)) + DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__, + count, dev->ep_out->maxpacket); while (count > 0 || write_req) { if (count > 0) { @@ -855,27 +932,36 @@ static void receive_file_work(struct work_struct *data) read_req = dev->rx_req[cur_buf]; cur_buf = (cur_buf + 1) % RX_REQ_MAX; - read_req->length = (count > MTP_BULK_BUFFER_SIZE - ? MTP_BULK_BUFFER_SIZE : count); + /* some h/w expects size to be aligned to ep's MTU */ + read_req->length = mtp_rx_req_len; + dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } } if (write_req) { - DBG(cdev, "rx %p %d\n", write_req, write_req->actual); + DBG(cdev, "rx %pK %d\n", write_req, write_req->actual); + start_time = ktime_get(); ret = vfs_write(filp, write_req->buf, write_req->actual, &offset); DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } + dev->perf[dev->dbg_write_index].vfs_wtime = + ktime_to_us(ktime_sub(ktime_get(), start_time)); + dev->perf[dev->dbg_write_index].vfs_wbytes = ret; + dev->dbg_write_index = + (dev->dbg_write_index + 1) % MAX_ITERATION; write_req = NULL; } @@ -883,8 +969,12 @@ static void receive_file_work(struct work_struct *data) /* wait for our last read to complete */ ret = wait_event_interruptible(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY); - if (dev->state == STATE_CANCELED) { - r = -ECANCELED; + if (dev->state == STATE_CANCELED + || dev->state == STATE_OFFLINE) { + if (dev->state == STATE_OFFLINE) + r = -EIO; + else + r = -ECANCELED; if (!dev->rx_done) usb_ep_dequeue(dev->ep_out, read_req); break; @@ -893,6 +983,11 @@ static void receive_file_work(struct work_struct *data) r = read_req->status; break; } + + /* Check if we aligned the size due to MTU constraint */ + if (count < read_req->length) + read_req->actual = (read_req->actual > count ? + count : read_req->actual); /* if xfer_file_length is 0xFFFFFFFF, then we read until * we get a zero length packet */ @@ -949,85 +1044,107 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) return ret; } -static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) +static long mtp_send_receive_ioctl(struct file *fp, unsigned int code, + struct mtp_file_range *mfr) { struct mtp_dev *dev = fp->private_data; struct file *filp = NULL; + struct work_struct *work; int ret = -EINVAL; - if (mtp_lock(&dev->ioctl_excl)) + if (mtp_lock(&dev->ioctl_excl)) { + DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state); return -EBUSY; + } + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancellation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + ret = -ECANCELED; + goto out; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + ret = -ENODEV; + goto out; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + /* hold a reference to the file while we are working with it */ + filp = fget(mfr->fd); + if (!filp) { + ret = -EBADF; + goto fail; + } + + /* write the parameters */ + dev->xfer_file = filp; + dev->xfer_file_offset = mfr->offset; + dev->xfer_file_length = mfr->length; + /* make sure write is done before parameters are read */ + smp_wmb(); + + if (code == MTP_SEND_FILE_WITH_HEADER) { + work = &dev->send_file_work; + dev->xfer_send_header = 1; + dev->xfer_command = mfr->command; + dev->xfer_transaction_id = mfr->transaction_id; + } else if (code == MTP_SEND_FILE) { + work = &dev->send_file_work; + dev->xfer_send_header = 0; + } else { + work = &dev->receive_file_work; + } + + /* We do the file transfer on a work queue so it will run + * in kernel context, which is necessary for vfs_read and + * vfs_write to use our buffers in the kernel address space. + */ + queue_work(dev->wq, work); + /* wait for operation to complete */ + flush_workqueue(dev->wq); + fput(filp); + + /* read the result */ + smp_rmb(); + ret = dev->xfer_result; + +fail: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + ret = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); +out: + mtp_unlock(&dev->ioctl_excl); + DBG(dev->cdev, "ioctl returning %d\n", ret); + return ret; +} + +static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value) +{ + struct mtp_dev *dev = fp->private_data; + struct mtp_file_range mfr; + struct mtp_event event; + int ret = -EINVAL; switch (code) { case MTP_SEND_FILE: case MTP_RECEIVE_FILE: case MTP_SEND_FILE_WITH_HEADER: - { - struct mtp_file_range mfr; - struct work_struct *work; - - spin_lock_irq(&dev->lock); - if (dev->state == STATE_CANCELED) { - /* report cancelation to userspace */ - dev->state = STATE_READY; - spin_unlock_irq(&dev->lock); - ret = -ECANCELED; - goto out; - } - if (dev->state == STATE_OFFLINE) { - spin_unlock_irq(&dev->lock); - ret = -ENODEV; - goto out; - } - dev->state = STATE_BUSY; - spin_unlock_irq(&dev->lock); - if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { ret = -EFAULT; goto fail; } - /* hold a reference to the file while we are working with it */ - filp = fget(mfr.fd); - if (!filp) { - ret = -EBADF; - goto fail; - } - - /* write the parameters */ - dev->xfer_file = filp; - dev->xfer_file_offset = mfr.offset; - dev->xfer_file_length = mfr.length; - smp_wmb(); - - if (code == MTP_SEND_FILE_WITH_HEADER) { - work = &dev->send_file_work; - dev->xfer_send_header = 1; - dev->xfer_command = mfr.command; - dev->xfer_transaction_id = mfr.transaction_id; - } else if (code == MTP_SEND_FILE) { - work = &dev->send_file_work; - dev->xfer_send_header = 0; - } else { - work = &dev->receive_file_work; - } - - /* We do the file transfer on a work queue so it will run - * in kernel context, which is necessary for vfs_read and - * vfs_write to use our buffers in the kernel address space. - */ - queue_work(dev->wq, work); - /* wait for operation to complete */ - flush_workqueue(dev->wq); - fput(filp); - - /* read the result */ - smp_rmb(); - ret = dev->xfer_result; - break; - } + ret = mtp_send_receive_ioctl(fp, code, &mfr); + break; case MTP_SEND_EVENT: - { - struct mtp_event event; + if (mtp_lock(&dev->ioctl_excl)) + return -EBUSY; /* return here so we don't change dev->state below, * which would interfere with bulk transfer state. */ @@ -1035,28 +1152,93 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) ret = -EFAULT; else ret = mtp_send_event(dev, &event); - goto out; + mtp_unlock(&dev->ioctl_excl); + break; + default: + DBG(dev->cdev, "unknown ioctl code: %d\n", code); } +fail: + return ret; +} + +/* + * 32 bit userspace calling into 64 bit kernel. handle ioctl code + * and userspace pointer + */ +#ifdef CONFIG_COMPAT +static long compat_mtp_ioctl(struct file *fp, unsigned int code, + unsigned long value) +{ + struct mtp_dev *dev = fp->private_data; + struct mtp_file_range mfr; + struct __compat_mtp_file_range cmfr; + struct mtp_event event; + struct __compat_mtp_event cevent; + unsigned int cmd; + bool send_file = false; + int ret = -EINVAL; + + switch (code) { + case COMPAT_MTP_SEND_FILE: + cmd = MTP_SEND_FILE; + send_file = true; + break; + case COMPAT_MTP_RECEIVE_FILE: + cmd = MTP_RECEIVE_FILE; + send_file = true; + break; + case COMPAT_MTP_SEND_FILE_WITH_HEADER: + cmd = MTP_SEND_FILE_WITH_HEADER; + send_file = true; + break; + case COMPAT_MTP_SEND_EVENT: + cmd = MTP_SEND_EVENT; + break; + default: + DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code); + ret = -ENOIOCTLCMD; + goto fail; } + if (send_file) { + if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) { + ret = -EFAULT; + goto fail; + } + mfr.fd = cmfr.fd; + mfr.offset = cmfr.offset; + mfr.length = cmfr.length; + mfr.command = cmfr.command; + mfr.transaction_id = cmfr.transaction_id; + ret = mtp_send_receive_ioctl(fp, cmd, &mfr); + } else { + if (mtp_lock(&dev->ioctl_excl)) + return -EBUSY; + /* return here so we don't change dev->state below, + * which would interfere with bulk transfer state. + */ + if (copy_from_user(&cevent, (void __user *)value, + sizeof(cevent))) { + ret = -EFAULT; + goto fail; + } + event.length = cevent.length; + event.data = compat_ptr(cevent.data); + ret = mtp_send_event(dev, &event); + mtp_unlock(&dev->ioctl_excl); + } fail: - spin_lock_irq(&dev->lock); - if (dev->state == STATE_CANCELED) - ret = -ECANCELED; - else if (dev->state != STATE_OFFLINE) - dev->state = STATE_READY; - spin_unlock_irq(&dev->lock); -out: - mtp_unlock(&dev->ioctl_excl); - DBG(dev->cdev, "ioctl returning %d\n", ret); return ret; } +#endif static int mtp_open(struct inode *ip, struct file *fp) { printk(KERN_INFO "mtp_open\n"); - if (mtp_lock(&_mtp_dev->open_excl)) + if (mtp_lock(&_mtp_dev->open_excl)) { + pr_err("%s mtp_release not called returning EBUSY\n", __func__); return -EBUSY; + } /* clear any error condition */ if (_mtp_dev->state != STATE_OFFLINE) @@ -1080,6 +1262,9 @@ static const struct file_operations mtp_fops = { .read = mtp_read, .write = mtp_write, .unlocked_ioctl = mtp_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_mtp_ioctl, +#endif .open = mtp_open, .release = mtp_release, }; @@ -1200,7 +1385,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f) struct mtp_instance *fi_mtp; dev->cdev = cdev; - DBG(cdev, "mtp_function_bind dev: %p\n", dev); + DBG(cdev, "%s dev: %pK\n", __func__, dev); /* allocate interface ID(s) */ id = usb_interface_id(c, f); @@ -1349,6 +1534,120 @@ static void mtp_function_disable(struct usb_function *f) VDBG(cdev, "%s disabled\n", dev->function.name); } +static int debug_mtp_read_stats(struct seq_file *s, void *unused) +{ + struct mtp_dev *dev = _mtp_dev; + int i; + unsigned long flags; + unsigned int min, max = 0, sum = 0, iteration = 0; + + seq_puts(s, "\n=======================\n"); + seq_puts(s, "MTP Write Stats:\n"); + seq_puts(s, "\n=======================\n"); + spin_lock_irqsave(&dev->lock, flags); + min = dev->perf[0].vfs_wtime; + for (i = 0; i < MAX_ITERATION; i++) { + seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n", + dev->perf[i].vfs_wbytes, + dev->perf[i].vfs_wtime); + if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) { + sum += dev->perf[i].vfs_wtime; + if (min > dev->perf[i].vfs_wtime) + min = dev->perf[i].vfs_wtime; + if (max < dev->perf[i].vfs_wtime) + max = dev->perf[i].vfs_wtime; + iteration++; + } + } + + seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n", + min, max, sum / iteration); + min = max = sum = iteration = 0; + seq_puts(s, "\n=======================\n"); + seq_puts(s, "MTP Read Stats:\n"); + seq_puts(s, "\n=======================\n"); + + min = dev->perf[0].vfs_rtime; + for (i = 0; i < MAX_ITERATION; i++) { + seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n", + dev->perf[i].vfs_rbytes, + dev->perf[i].vfs_rtime); + if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) { + sum += dev->perf[i].vfs_rtime; + if (min > dev->perf[i].vfs_rtime) + min = dev->perf[i].vfs_rtime; + if (max < dev->perf[i].vfs_rtime) + max = dev->perf[i].vfs_rtime; + iteration++; + } + } + + seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n", + min, max, sum / iteration); + spin_unlock_irqrestore(&dev->lock, flags); + return 0; +} + +static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int clear_stats; + unsigned long flags; + struct mtp_dev *dev = _mtp_dev; + + if (buf == NULL) { + pr_err("[%s] EINVAL\n", __func__); + goto done; + } + + if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) { + pr_err("Wrong value. To clear stats, enter value as 0.\n"); + goto done; + } + + spin_lock_irqsave(&dev->lock, flags); + memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0])); + dev->dbg_read_index = 0; + dev->dbg_write_index = 0; + spin_unlock_irqrestore(&dev->lock, flags); +done: + return count; +} + +static int debug_mtp_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_mtp_read_stats, inode->i_private); +} + +static const struct file_operations debug_mtp_ops = { + .open = debug_mtp_open, + .read = seq_read, + .write = debug_mtp_reset_stats, +}; + +struct dentry *dent_mtp; +static void mtp_debugfs_init(void) +{ + struct dentry *dent_mtp_status; + + dent_mtp = debugfs_create_dir("usb_mtp", 0); + if (!dent_mtp || IS_ERR(dent_mtp)) + return; + + dent_mtp_status = debugfs_create_file("status", 0644, + dent_mtp, 0, &debug_mtp_ops); + if (!dent_mtp_status || IS_ERR(dent_mtp_status)) { + debugfs_remove(dent_mtp); + dent_mtp = NULL; + return; + } +} + +static void mtp_debugfs_remove(void) +{ + debugfs_remove_recursive(dent_mtp); +} + static int __mtp_setup(struct mtp_instance *fi_mtp) { struct mtp_dev *dev; @@ -1385,6 +1684,7 @@ static int __mtp_setup(struct mtp_instance *fi_mtp) if (ret) goto err2; + mtp_debugfs_init(); return 0; err2: @@ -1409,6 +1709,7 @@ static void mtp_cleanup(void) if (!dev) return; + mtp_debugfs_remove(); misc_deregister(&mtp_device); destroy_workqueue(dev->wq); _mtp_dev = NULL; @@ -1567,6 +1868,7 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, dev->function.disable = mtp_function_disable; dev->function.setup = mtp_ctrlreq_configfs; dev->function.free_func = mtp_free; + fi->f = &dev->function; return &dev->function; } diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c index d43e86cea74f34253c4f7bbbaa73e8f42cf7ffae..649ff4da5ba3417975adeef8f2884c70af0257ec 100644 --- a/drivers/usb/gadget/function/f_obex.c +++ b/drivers/usb/gadget/function/f_obex.c @@ -377,7 +377,7 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); + ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status); return status; } diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 3971bbab88bd7ccf0282cd4a2ade6d636cfa1c8b..147a958be7caa62c6bff5909adb90fd35669bec9 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -402,11 +402,14 @@ int u_audio_start_capture(struct g_audio *audio_dev) struct usb_ep *ep; struct uac_rtd_params *prm; struct uac_params *params = &audio_dev->params; - int req_len, i; + int req_len, i, ret; ep = audio_dev->out_ep; prm = &uac->c_prm; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret) + return ret; + req_len = prm->max_psize; prm->ep_enabled = true; @@ -455,11 +458,13 @@ int u_audio_start_playback(struct g_audio *audio_dev) struct uac_params *params = &audio_dev->params; unsigned int factor, rate; const struct usb_endpoint_descriptor *ep_desc; - int req_len, i; + int req_len, i, ret; ep = audio_dev->in_ep; prm = &uac->p_prm; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret) + return ret; ep_desc = ep->desc; diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 4176216d54be7589a1a84b2d39ce7d803c4d3299..8435ca0b255259a87274dc1d2371c7bb49901224 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1301,10 +1301,14 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) } tty_port_init(&port->port); + tty_buffer_set_limit(&port->port, 8388608); spin_lock_init(&port->port_lock); init_waitqueue_head(&port->drain_wait); init_waitqueue_head(&port->close_wait); + pr_debug("%s open:ttyGS%d and set 8388608, avail:%d\n", __func__, + port_num, tty_buffer_space_avail(&port->port)); + tasklet_init(&port->push, gs_rx_push, (unsigned long) port); INIT_LIST_HEAD(&port->read_pool); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index b52c8955209b94dfdbfd964f0ab0fcbcc16d676e..e585e5096100c892233810330b44bb7efb8f2f05 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -274,8 +274,6 @@ static int xhci_plat_probe(struct platform_device *pdev) if (!hcd) return -ENOMEM; - hcd_to_bus(hcd)->skip_resume = true; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { @@ -331,14 +329,18 @@ static int xhci_plat_probe(struct platform_device *pdev) goto disable_clk; } - hcd_to_bus(xhci->shared_hcd)->skip_resume = true; - if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) xhci->quirks |= XHCI_BROKEN_PORT_PED; + if (!device_property_read_bool(&pdev->dev, + "host-poweroff-in-pm-suspend")) { + hcd_to_bus(hcd)->skip_resume = true; + hcd_to_bus(xhci->shared_hcd)->skip_resume = true; + } + if (device_property_read_u32(&pdev->dev, "xhci-imod-value", &imod)) imod = 0; @@ -437,6 +439,51 @@ static int xhci_plat_remove(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int xhci_plat_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + /* + * 'skip_resume' will be true for targets not supporting PM suspend if + * runtimePM state is active. No need of xhci_plat PM ops in such case. + */ + if (!xhci || hcd_to_bus(hcd)->skip_resume) + return 0; + + dev_dbg(dev, "xhci-plat PM suspend\n"); + + /* Disable wakeup capability */ + return xhci_suspend(xhci, false); +} + +static int xhci_plat_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + + /* xhci PM ops not required if 'skip_resume' is true */ + if (!xhci || hcd_to_bus(hcd)->skip_resume) + return 0; + + dev_dbg(dev, "xhci-plat PM resume\n"); + + ret = xhci_priv_resume_quirk(hcd); + if (ret) + return ret; + + /* resume from hibernation/power-collapse */ + ret = xhci_resume(xhci, true); + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return ret; +} +#endif + static int __maybe_unused xhci_plat_runtime_idle(struct device *dev) { /* @@ -489,7 +536,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) } static const struct dev_pm_ops xhci_plat_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL) + SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 27d267a54e68da860e55c61c1a82459cf14f29cc..8cc779d9c99890ba23421002f7719cdbcf129b86 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -662,8 +662,6 @@ static void xhci_stop(struct usb_hcd *hcd) /* Only halt host and free memory after both hcds are removed */ if (!usb_hcd_is_primary_hcd(hcd)) { - /* usb core will free this hcd shortly, unset pointer */ - xhci->shared_hcd = NULL; mutex_unlock(&xhci->mutex); return; } diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index ddddd6387f6642e5792479c535adf347f2bfb962..e108f3d82255f3d5e49fb0756a5b2c66dda98bd6 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -184,10 +184,13 @@ static ssize_t warm_reset_store(struct device *dev, struct usb_interface *intf = to_usb_interface(dev); struct usb_device *hdev = interface_to_usbdev(intf); struct lvs_rh *lvs = usb_get_intfdata(intf); + int port; int ret; - ret = lvs_rh_set_port_feature(hdev, lvs->portnum, - USB_PORT_FEAT_BH_PORT_RESET); + if (kstrtoint(buf, 0, &port) || port < 1 || port > 255) + port = lvs->portnum; + + ret = lvs_rh_set_port_feature(hdev, port, USB_PORT_FEAT_BH_PORT_RESET); if (ret < 0) { dev_err(dev, "can't issue warm reset %d\n", ret); return ret; @@ -299,10 +302,14 @@ static ssize_t enable_compliance_store(struct device *dev, struct usb_interface *intf = to_usb_interface(dev); struct usb_device *hdev = interface_to_usbdev(intf); struct lvs_rh *lvs = usb_get_intfdata(intf); + int port; int ret; + if (kstrtoint(buf, 0, &port) || port < 1 || port > 255) + port = lvs->portnum; + ret = lvs_rh_set_port_feature(hdev, - lvs->portnum | USB_SS_PORT_LS_COMP_MOD << 3, + port | (USB_SS_PORT_LS_COMP_MOD << 3), USB_PORT_FEAT_LINK_STATE); if (ret < 0) { dev_err(dev, "can't enable compliance mode %d\n", ret); diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 901f01435a9d0f817e1f2ec780d33aea4dd3cd77..bd82fb9d17043327610c9193d28ef0d97b65fa13 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -550,6 +550,10 @@ static inline void start_usb_peripheral(struct usbpd *pd) val.intval = 1; extcon_set_property(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS, val); + val.intval = pd->typec_mode > POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ? 1 : 0; + extcon_set_property(pd->extcon, EXTCON_USB, + EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT, val); + extcon_set_state_sync(pd->extcon, EXTCON_USB, 1); } @@ -593,15 +597,18 @@ static int usbpd_release_ss_lane(struct usbpd *pd, goto err_exit; } - if (pd->peer_usb_comm) { - ret = extcon_blocking_sync(pd->extcon, EXTCON_USB_HOST, 0); - if (ret) { - usbpd_err(&pd->dev, "err(%d) for releasing ss lane", - ret); - goto err_exit; - } + stop_usb_host(pd); + + /* blocks until USB host is completely stopped */ + ret = extcon_blocking_sync(pd->extcon, EXTCON_USB_HOST, 0); + if (ret) { + usbpd_err(&pd->dev, "err(%d) stopping host", ret); + goto err_exit; } + if (pd->peer_usb_comm) + start_usb_host(pd, false); + pd->ss_lane_svid = hdlr->svid; /* DP 4 Lane mode */ @@ -1194,7 +1201,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) .msg_rx_cb = phy_msg_received, .shutdown_cb = phy_shutdown, .frame_filter_val = FRAME_FILTER_EN_SOP | - FRAME_FILTER_EN_SOPI | FRAME_FILTER_EN_HARD_RESET, }; union power_supply_propval val = {0}; @@ -1256,6 +1262,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) phy_params.data_role = pd->current_dr; phy_params.power_role = pd->current_pr; + if (pd->vconn_enabled) + phy_params.frame_filter_val |= + FRAME_FILTER_EN_SOPI; + ret = pd_phy_open(&phy_params); if (ret) { WARN_ON_ONCE(1); @@ -1314,8 +1324,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) if (PD_RDO_OBJ_POS(pd->rdo) != 1 || PD_RDO_FIXED_CURR(pd->rdo) > - PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps) || - PD_RDO_FIXED_CURR_MINMAX(pd->rdo) > PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) { /* send Reject */ ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG); @@ -1445,6 +1453,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) phy_params.data_role = pd->current_dr; phy_params.power_role = pd->current_pr; + if (pd->vconn_enabled) + phy_params.frame_filter_val |= + FRAME_FILTER_EN_SOPI; + ret = pd_phy_open(&phy_params); if (ret) { WARN_ON_ONCE(1); @@ -2003,9 +2015,19 @@ static void vconn_swap(struct usbpd *pd) int ret; if (pd->vconn_enabled) { + pd_phy_update_frame_filter(FRAME_FILTER_EN_SOP | + FRAME_FILTER_EN_HARD_RESET); + pd->current_state = PE_VCS_WAIT_FOR_VCONN; kick_sm(pd, VCONN_ON_TIME); } else { + if (!pd->vconn) { + pd->vconn = devm_regulator_get(pd->dev.parent, "vconn"); + if (IS_ERR(pd->vconn)) { + usbpd_err(&pd->dev, "Unable to get vconn\n"); + return; + } + } ret = regulator_enable(pd->vconn); if (ret) { usbpd_err(&pd->dev, "Unable to enable vconn\n"); @@ -2014,6 +2036,10 @@ static void vconn_swap(struct usbpd *pd) pd->vconn_enabled = true; + pd_phy_update_frame_filter(FRAME_FILTER_EN_SOP | + FRAME_FILTER_EN_SOPI | + FRAME_FILTER_EN_HARD_RESET); + /* * Small delay to ensure Vconn has ramped up. This is well * below tVCONNSourceOn (100ms) so we still send PS_RDY within @@ -2055,6 +2081,13 @@ static int enable_vbus(struct usbpd *pd) msleep(100); /* need to wait an additional tCCDebounce */ enable_reg: + if (!pd->vbus) { + pd->vbus = devm_regulator_get(pd->dev.parent, "vbus"); + if (IS_ERR(pd->vbus)) { + usbpd_err(&pd->dev, "Unable to get vbus\n"); + return -EAGAIN; + } + } ret = regulator_enable(pd->vbus); if (ret) usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret); @@ -2126,7 +2159,8 @@ static void usbpd_sm(struct work_struct *w) /* Disconnect? */ if (pd->current_pr == PR_NONE) { - if (pd->current_state == PE_UNKNOWN) + if (pd->current_state == PE_UNKNOWN && + pd->current_dr == DR_NONE) goto sm_done; if (pd->vconn_enabled) { @@ -2278,6 +2312,14 @@ static void usbpd_sm(struct work_struct *w) if (!pd->vconn_enabled && pd->typec_mode == POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) { + if (!pd->vconn) { + pd->vconn = devm_regulator_get( + pd->dev.parent, "vconn"); + if (IS_ERR(pd->vconn)) { + usbpd_err(&pd->dev, "Unable to get vconn\n"); + return; + } + } ret = regulator_enable(pd->vconn); if (ret) usbpd_err(&pd->dev, "Unable to enable vconn\n"); @@ -4126,23 +4168,13 @@ struct usbpd *usbpd_create(struct device *parent) EXTCON_PROP_USB_TYPEC_POLARITY); extcon_set_property_capability(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS); + extcon_set_property_capability(pd->extcon, EXTCON_USB, + EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT); extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST, EXTCON_PROP_USB_TYPEC_POLARITY); extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST, EXTCON_PROP_USB_SS); - pd->vbus = devm_regulator_get(parent, "vbus"); - if (IS_ERR(pd->vbus)) { - ret = PTR_ERR(pd->vbus); - goto put_psy; - } - - pd->vconn = devm_regulator_get(parent, "vconn"); - if (IS_ERR(pd->vconn)) { - ret = PTR_ERR(pd->vconn); - goto put_psy; - } - pd->vconn_is_external = device_property_present(parent, "qcom,vconn-uses-external-source"); diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c index 3a945ad9cb055d1e9d14cb1abeaa714dfdc2222f..80724807cbeb5fc2377f4097fdaf9a1bf7b2a776 100644 --- a/drivers/usb/pd/qpnp-pdphy.c +++ b/drivers/usb/pd/qpnp-pdphy.c @@ -347,6 +347,14 @@ int pd_phy_update_roles(enum data_role dr, enum power_role pr) } EXPORT_SYMBOL(pd_phy_update_roles); +int pd_phy_update_frame_filter(u8 frame_filter_val) +{ + struct usb_pdphy *pdphy = __pdphy; + + return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, frame_filter_val); +} +EXPORT_SYMBOL(pd_phy_update_frame_filter); + int pd_phy_open(struct pd_phy_params *params) { int ret; diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h index fbddd747514a6beda479c5c6c62cd38d2e675bfe..5c7c5cdf4c7da53a8b9972e2f705226628b2e46b 100644 --- a/drivers/usb/pd/usbpd.h +++ b/drivers/usb/pd/usbpd.h @@ -77,6 +77,7 @@ int pd_phy_signal(enum pd_sig_type sig); int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop); int pd_phy_update_roles(enum data_role dr, enum power_role pr); +int pd_phy_update_frame_filter(u8 frame_filter_val); void pd_phy_close(void); #else static inline int pd_phy_open(struct pd_phy_params *params) @@ -100,6 +101,11 @@ static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr) return -ENODEV; } +static inline int pd_phy_update_frame_filter(u8 frame_filter_val) +{ + return -ENODEV; +} + static inline void pd_phy_close(void) { } diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c index 78bdd13a78aec61c5483b8c45157d67e3dfcad26..405f2386c98a811a5c0736f3ee4104149de98466 100644 --- a/drivers/usb/phy/phy-msm-snps-hs.c +++ b/drivers/usb/phy/phy-msm-snps-hs.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -27,16 +28,17 @@ #include #include #include +#include #define USB2_PHY_USB_PHY_UTMI_CTRL0 (0x3c) +#define OPMODE_MASK (0x3 << 3) +#define OPMODE_NONDRIVING (0x1 << 3) #define SLEEPM BIT(0) #define USB2_PHY_USB_PHY_UTMI_CTRL5 (0x50) -#define ATERESET BIT(0) #define POR BIT(1) #define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0 (0x54) -#define VATESTENB_MASK (0x3 << 0) #define RETENABLEN BIT(3) #define FSEL_MASK (0x7 << 4) #define FSEL_DEFAULT (0x3 << 4) @@ -55,14 +57,8 @@ #define USB2_SUSPEND_N BIT(2) #define USB2_SUSPEND_N_SEL BIT(3) -#define USB2_PHY_USB_PHY_HS_PHY_TEST0 (0x80) -#define TESTDATAIN_MASK (0xff << 0) - -#define USB2_PHY_USB_PHY_HS_PHY_TEST1 (0x84) -#define TESTDATAOUTSEL BIT(4) -#define TOGGLE_2WR BIT(6) - #define USB2_PHY_USB_PHY_CFG0 (0x94) +#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN BIT(0) #define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1) #define USB2_PHY_USB_PHY_REFCLK_CTRL (0xa0) @@ -72,6 +68,11 @@ #define USB2PHY_USB_PHY_RTUNE_SEL (0xb4) #define RTUNE_SEL BIT(0) +#define TXPREEMPAMPTUNE0(x) (x << 6) +#define TXPREEMPAMPTUNE0_MASK (BIT(7) | BIT(6)) +#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1 0x70 +#define TXVREFTUNE0_MASK 0xF + #define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */ #define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */ #define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */ @@ -98,6 +99,7 @@ struct msm_hsphy { bool power_enabled; bool suspended; bool cable_connected; + bool dpdm_enable; int *param_override_seq; int param_override_seq_cnt; @@ -105,12 +107,21 @@ struct msm_hsphy { void __iomem *phy_rcal_reg; u32 rcal_mask; + struct mutex phy_lock; + struct regulator_desc dpdm_rdesc; + struct regulator_dev *dpdm_rdev; + /* emulation targets specific */ void __iomem *emu_phy_base; int *emu_init_seq; int emu_init_seq_len; int *emu_dcm_reset_seq; int emu_dcm_reset_seq_len; + + /* debugfs entries */ + struct dentry *root; + u8 txvref_tune0; + u8 pre_emphasis; }; static void msm_hsphy_enable_clocks(struct msm_hsphy *phy, bool on) @@ -372,7 +383,8 @@ static int msm_hsphy_init(struct usb_phy *uphy) msm_hsphy_reset(phy); msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0, - UTMI_PHY_CMN_CTRL_OVERRIDE_EN, UTMI_PHY_CMN_CTRL_OVERRIDE_EN); + UTMI_PHY_CMN_CTRL_OVERRIDE_EN, + UTMI_PHY_CMN_CTRL_OVERRIDE_EN); msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5, POR, POR); @@ -397,6 +409,23 @@ static int msm_hsphy_init(struct usb_phy *uphy) hsusb_phy_write_seq(phy->base, phy->param_override_seq, phy->param_override_seq_cnt, 0); + if (phy->pre_emphasis) { + u8 val = TXPREEMPAMPTUNE0(phy->pre_emphasis) & + TXPREEMPAMPTUNE0_MASK; + if (val) + msm_usb_write_readback(phy->base, + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1, + TXPREEMPAMPTUNE0_MASK, val); + } + + if (phy->txvref_tune0) { + u8 val = phy->txvref_tune0 & TXVREFTUNE0_MASK; + + msm_usb_write_readback(phy->base, + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1, + TXVREFTUNE0_MASK, val); + } + if (phy->phy_rcal_reg) { rcal_code = readl_relaxed(phy->phy_rcal_reg) & phy->rcal_mask; @@ -412,26 +441,9 @@ static int msm_hsphy_init(struct usb_phy *uphy) msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2, VREGBYPASS, VREGBYPASS); - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5, - ATERESET, ATERESET); - - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1, - TESTDATAOUTSEL, TESTDATAOUTSEL); - - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1, - TOGGLE_2WR, TOGGLE_2WR); - - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0, - VATESTENB_MASK, 0); - - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST0, - TESTDATAIN_MASK, 0); - - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, - USB2_SUSPEND_N_SEL, USB2_SUSPEND_N_SEL); - msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, - USB2_SUSPEND_N, USB2_SUSPEND_N); + USB2_SUSPEND_N_SEL | USB2_SUSPEND_N, + USB2_SUSPEND_N_SEL | USB2_SUSPEND_N); msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0, SLEEPM, SLEEPM); @@ -443,7 +455,7 @@ static int msm_hsphy_init(struct usb_phy *uphy) USB2_SUSPEND_N_SEL, 0); msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0, - UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0); + UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0); return 0; } @@ -463,8 +475,14 @@ static int msm_hsphy_set_suspend(struct usb_phy *uphy, int suspend) (phy->phy.flags & PHY_HOST_MODE)) { msm_hsphy_enable_clocks(phy, false); } else {/* Cable disconnect */ - msm_hsphy_enable_clocks(phy, false); - msm_hsphy_enable_power(phy, false); + mutex_lock(&phy->phy_lock); + if (!phy->dpdm_enable) { + msm_hsphy_enable_clocks(phy, false); + msm_hsphy_enable_power(phy, false); + } else { + dev_dbg(uphy->dev, "dpdm reg still active. Keep clocks/ldo ON\n"); + } + mutex_unlock(&phy->phy_lock); } phy->suspended = true; } else { /* Bus resume and cable connect */ @@ -495,6 +513,121 @@ static int msm_hsphy_notify_disconnect(struct usb_phy *uphy, return 0; } +static int msm_hsphy_dpdm_regulator_enable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_hsphy *phy = rdev_get_drvdata(rdev); + + dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n", + __func__, phy->dpdm_enable); + + mutex_lock(&phy->phy_lock); + if (!phy->dpdm_enable) { + ret = msm_hsphy_enable_power(phy, true); + if (ret) { + mutex_unlock(&phy->phy_lock); + return ret; + } + + msm_hsphy_enable_clocks(phy, true); + msm_hsphy_reset(phy); + + /* + * For PMIC charger detection, place PHY in UTMI non-driving + * mode which leaves Dp and Dm lines in high-Z state. + */ + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, + USB2_SUSPEND_N_SEL | USB2_SUSPEND_N, + USB2_SUSPEND_N_SEL | USB2_SUSPEND_N); + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0, + OPMODE_MASK, OPMODE_NONDRIVING); + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0, + UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN, + UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN); + + phy->dpdm_enable = true; + } + mutex_unlock(&phy->phy_lock); + + return ret; +} + +static int msm_hsphy_dpdm_regulator_disable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_hsphy *phy = rdev_get_drvdata(rdev); + + dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n", + __func__, phy->dpdm_enable); + + mutex_lock(&phy->phy_lock); + if (phy->dpdm_enable) { + if (!phy->cable_connected) { + msm_hsphy_enable_clocks(phy, false); + ret = msm_hsphy_enable_power(phy, false); + if (ret < 0) { + mutex_unlock(&phy->phy_lock); + return ret; + } + } + phy->dpdm_enable = false; + } + mutex_unlock(&phy->phy_lock); + + return ret; +} + +static int msm_hsphy_dpdm_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct msm_hsphy *phy = rdev_get_drvdata(rdev); + + dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n", + __func__, phy->dpdm_enable); + + return phy->dpdm_enable; +} + +static struct regulator_ops msm_hsphy_dpdm_regulator_ops = { + .enable = msm_hsphy_dpdm_regulator_enable, + .disable = msm_hsphy_dpdm_regulator_disable, + .is_enabled = msm_hsphy_dpdm_regulator_is_enabled, +}; + +static int msm_hsphy_regulator_init(struct msm_hsphy *phy) +{ + struct device *dev = phy->phy.dev; + struct regulator_config cfg = {}; + struct regulator_init_data *init_data; + + init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL); + if (!init_data) + return -ENOMEM; + + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; + phy->dpdm_rdesc.owner = THIS_MODULE; + phy->dpdm_rdesc.type = REGULATOR_VOLTAGE; + phy->dpdm_rdesc.ops = &msm_hsphy_dpdm_regulator_ops; + phy->dpdm_rdesc.name = kbasename(dev->of_node->full_name); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.driver_data = phy; + cfg.of_node = dev->of_node; + + phy->dpdm_rdev = devm_regulator_register(dev, &phy->dpdm_rdesc, &cfg); + if (IS_ERR(phy->dpdm_rdev)) + return PTR_ERR(phy->dpdm_rdev); + + return 0; +} + +static void msm_hsphy_create_debugfs(struct msm_hsphy *phy) +{ + phy->root = debugfs_create_dir(dev_name(phy->phy.dev), NULL); + debugfs_create_x8("pre_emphasis", 0644, phy->root, &phy->pre_emphasis); + debugfs_create_x8("txvref_tune0", 0644, phy->root, &phy->txvref_tune0); +} + static int msm_hsphy_probe(struct platform_device *pdev) { struct msm_hsphy *phy; @@ -680,6 +813,7 @@ static int msm_hsphy_probe(struct platform_device *pdev) goto err_ret; } + mutex_init(&phy->phy_lock); platform_set_drvdata(pdev, phy); if (phy->emu_init_seq) @@ -695,6 +829,14 @@ static int msm_hsphy_probe(struct platform_device *pdev) if (ret) return ret; + ret = msm_hsphy_regulator_init(phy); + if (ret) { + usb_remove_phy(&phy->phy); + return ret; + } + + msm_hsphy_create_debugfs(phy); + return 0; err_ret: @@ -708,6 +850,8 @@ static int msm_hsphy_remove(struct platform_device *pdev) if (!phy) return 0; + debugfs_remove_recursive(phy->root); + usb_remove_phy(&phy->phy); clk_disable_unprepare(phy->ref_clk_src); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d4124551fb56cacf50190ce9b10a7e1975701c46..0600dadd6a0c8e59b4af2242231c57e81959574c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -199,6 +199,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ +#define DELL_PRODUCT_5821E 0x81d7 + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -1033,6 +1035,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 34c5a75f98a750427a0fd4065060a3acddb8aec1..2153e67eeeeebc8eb5a962e687224cdc1374fc4e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -55,6 +55,8 @@ static const struct usb_device_id id_table[] = { .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC232B), + .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 123289085ee25974c1045adaa7fa56dd11198169..cec7141245ef6afb884dcc9b6d8ca53841f31805 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -29,6 +29,7 @@ #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 #define ATEN_PRODUCT_UC485 0x2021 +#define ATEN_PRODUCT_UC232B 0x2022 #define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 4c4ac4705ac03f8d850a653fe91e35b78bfe25b6..a9c5564b6b65bc7d25e4ef47b50e463731d54aa9 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -773,9 +773,9 @@ static void sierra_close(struct usb_serial_port *port) kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(serial->interface); - spin_lock(&portdata->lock); + spin_lock_irq(&portdata->lock); portdata->outstanding_urbs--; - spin_unlock(&portdata->lock); + spin_unlock_irq(&portdata->lock); } sierra_stop_rx_urbs(port); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 244e5256c526f876b3afd2c41cdd333e7183dd4a..3cf74f54c7a1e2645f12c71c06e2c4da96a47044 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1578,9 +1578,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) d->iotlb = niotlb; for (i = 0; i < d->nvqs; ++i) { - mutex_lock(&d->vqs[i]->mutex); - d->vqs[i]->iotlb = niotlb; - mutex_unlock(&d->vqs[i]->mutex); + struct vhost_virtqueue *vq = d->vqs[i]; + + mutex_lock(&vq->mutex); + vq->iotlb = niotlb; + __vhost_vq_meta_reset(vq); + mutex_unlock(&vq->mutex); } vhost_umem_clean(oiotlb); diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c index e09d700f3c87c91e3ce2ec82dabebc73ba6670ce..a7393cce74ff04599f0c1d65cdb3e4d16c5b6d85 100644 --- a/drivers/video/backlight/qcom-spmi-wled.c +++ b/drivers/video/backlight/qcom-spmi-wled.c @@ -2304,6 +2304,8 @@ static int wled_probe(struct platform_device *pdev) return rc; } + mutex_init(&wled->lock); + val = WLED_DEFAULT_BRIGHTNESS; of_property_read_u32(pdev->dev.of_node, "default-brightness", &val); wled->brightness = val; @@ -2333,7 +2335,6 @@ static int wled_probe(struct platform_device *pdev) return rc; } - mutex_init(&wled->lock); platform_set_drvdata(pdev, wled); memset(&props, 0, sizeof(struct backlight_properties)); diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c index fba6ab8c5eff3162ef7ff53e9dd1705a4fa25d19..addb982ca987947093f5e596fa4e9da27372290f 100644 --- a/drivers/video/fbdev/msm/mdss_debug.c +++ b/drivers/video/fbdev/msm/mdss_debug.c @@ -420,6 +420,39 @@ static int mdss_debug_base_release(struct inode *inode, struct file *file) return 0; } +/** + * mdss_debug_base_is_valid_range - verify if requested memory range is valid + * @off: address offset in bytes + * @cnt: memory size in bytes + * Return: true if valid; false otherwise + */ +static bool mdss_debug_base_is_valid_range(u32 off, u32 cnt) +{ + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + struct mdss_debug_data *mdd = mdata->debug_inf.debug_data; + struct range_dump_node *node; + struct mdss_debug_base *base; + + pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt); + + list_for_each_entry(base, &mdd->base_list, head) { + list_for_each_entry(node, &base->dump_list, head) { + pr_debug("%s: start=0x%x end=0x%x\n", node->range_name, + node->offset.start, node->offset.end); + + if (node->offset.start <= off + && off <= node->offset.end + && off + cnt <= node->offset.end) { + pr_debug("valid range requested\n"); + return true; + } + } + } + + pr_err("invalid range requested\n"); + return false; +} + static ssize_t mdss_debug_base_offset_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { @@ -439,6 +472,9 @@ static ssize_t mdss_debug_base_offset_write(struct file *file, buf[count] = 0; /* end of string */ + if (sscanf(buf, "%5x %x", &off, &cnt) != 2) + return -EFAULT; + if (off % sizeof(u32)) return -EINVAL; @@ -451,6 +487,9 @@ static ssize_t mdss_debug_base_offset_write(struct file *file, if (cnt > (dbg->max_offset - off)) cnt = dbg->max_offset - off; + if (!mdss_debug_base_is_valid_range(off, cnt)) + return -EINVAL; + mutex_lock(&mdss_debug_lock); dbg->off = off; dbg->cnt = cnt; diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 93cd5e567d07f138f1eb22b2626784372ad8667c..870fd287f97fc6099593a333e0919e9638c1c29a 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -131,7 +131,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (inode->i_sb->s_blocksize_bits - 9); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT); ret = bio_add_page(bio, ciphertext_page, inode->i_sb->s_blocksize, 0); if (ret != inode->i_sb->s_blocksize) { diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 737af4b41f711ac3c449d2e410108bc39e4d8c88..68ac38746a7811ef39b5bf47d0877567e8ca8317 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -117,7 +117,6 @@ static int validate_user_key(struct fscrypt_info *crypt_info, res = -ENOKEY; goto out; } - res = derive_key_aes(ctx->nonce, master_key, crypt_info->ci_raw_key); /* If we don't need to derive, we still want to do everything * up until now to validate the key. It's cleaner to fail now * than to fail in block I/O. diff --git a/fs/dcache.c b/fs/dcache.c index b88eee4dbbc5b877b3cf5042a7918d1ccbc32428..dabbd665cc901915f6b33ac0f311af4f531d09f2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -357,14 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry) __releases(dentry->d_inode->i_lock) { struct inode *inode = dentry->d_inode; - bool hashed = !d_unhashed(dentry); - if (hashed) - raw_write_seqcount_begin(&dentry->d_seq); + raw_write_seqcount_begin(&dentry->d_seq); __d_clear_type_and_inode(dentry); hlist_del_init(&dentry->d_u.d_alias); - if (hashed) - raw_write_seqcount_end(&dentry->d_seq); + raw_write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) @@ -1922,10 +1919,12 @@ struct dentry *d_make_root(struct inode *root_inode) if (root_inode) { res = __d_alloc(root_inode->i_sb, NULL); - if (res) + if (res) { + res->d_flags |= DCACHE_RCUACCESS; d_instantiate(res, root_inode); - else + } else { iput(root_inode); + } } return res; } diff --git a/fs/direct-io.c b/fs/direct-io.c index c03813a80a349f7a0f88059d64fcc88e158ccc2a..e248910ad05ef0d61fdbe0e20e45150915990955 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -37,6 +37,8 @@ #include #include #include +#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION) +#include /* * How many user pages to map in one call to get_user_pages(). This determines @@ -430,6 +432,21 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } +static bool is_inode_filesystem_type(const struct inode *inode, + const char *fs_type) +{ + if (!inode || !fs_type) + return false; + + if (!inode->i_sb) + return false; + + if (!inode->i_sb->s_type) + return false; + + return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); +} + /* * In the AIO read case we speculatively dirty the pages before starting IO. * During IO completion, any of these pages which happen to have been written @@ -454,6 +471,14 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) dio->bio_disk = bio->bi_disk; bio->bi_dio_inode = dio->inode; +/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */ +#define PG_DUN_NEW(i,p) \ + (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff)) + + if (is_inode_filesystem_type(dio->inode, "f2fs")) + fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode, + (sdio->logical_offset_in_bio >> PAGE_SHIFT))); + if (sdio->submit_io) { sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); dio->bio_cookie = BLK_QC_T_NONE; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 69e83cf4c69936a7668b9662d5093f67965a3f43..af982d472cb1c9856095d6957cbc7ae9f527b9e1 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -205,7 +205,10 @@ typedef struct ext4_io_end { ssize_t size; /* size of the extent */ } ext4_io_end_t; +#define EXT4_IO_ENCRYPTED 1 + struct ext4_io_submit { + unsigned int io_flags; struct writeback_control *io_wbc; struct bio *io_bio; ext4_io_end_t *io_end; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 332d0224f595f52339522bc4a90c77bdc7e2ab81..9d723ce4f93ad54bc6d4db25e89734af2fea51d5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1215,11 +1215,12 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { - ll_rw_block(REQ_OP_READ, 0, 1, &bh); - *wait_bh++ = bh; decrypt = ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && !fscrypt_using_hardware_encryption(inode); + ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), + 1, &bh); + *wait_bh++ = bh; } } /* @@ -3989,6 +3990,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; + bool decrypt; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, @@ -4031,14 +4033,15 @@ static int __ext4_block_zero_page_range(handle_t *handle, if (!buffer_uptodate(bh)) { err = -EIO; - ll_rw_block(REQ_OP_READ, 0, 1, &bh); + decrypt = S_ISREG(inode->i_mode) && + ext4_encrypted_inode(inode) && + !fscrypt_using_hardware_encryption(inode); + ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (S_ISREG(inode->i_mode) && - ext4_encrypted_inode(inode) && - !fscrypt_using_hardware_encryption(inode)) { + if (decrypt) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 11566757db265c0763d8478b675337a718e4f2a9..b38e6b8198d65b00d2101f6524962da85c941414 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -352,6 +352,8 @@ void ext4_io_submit(struct ext4_io_submit *io) int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0; io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint; + if (io->io_flags & EXT4_IO_ENCRYPTED) + io_op_flags |= REQ_NOENCRYPT; bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); submit_bio(io->io_bio); } @@ -361,6 +363,7 @@ void ext4_io_submit(struct ext4_io_submit *io) void ext4_io_submit_init(struct ext4_io_submit *io, struct writeback_control *wbc) { + io->io_flags = 0; io->io_wbc = wbc; io->io_bio = NULL; io->io_end = NULL; @@ -504,6 +507,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; + if (data_page) + io->io_flags |= EXT4_IO_ENCRYPTED; ret = io_submit_add_bh(io, inode, data_page ? data_page : page, bh); if (ret) { diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 039aa698159aea57d1f4f0a8f05fb1d4ea1537b4..335b03fa81f92f9ad458235c14888611d3f4f379 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -298,7 +298,8 @@ int ext4_mpage_readpages(struct address_space *mapping, bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; - bio_set_op_attrs(bio, REQ_OP_READ, 0); + bio_set_op_attrs(bio, REQ_OP_READ, + ctx ? REQ_NOENCRYPT : 0); } length = first_hole << blkbits; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5951e6316ead377640a7dde1062ad2a0cd190c8c..bd4fc9d96cd42ddf03fe0cd82129d2e6bf4c5bfd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -68,6 +68,7 @@ static void ext4_mark_recovery_complete(struct super_block *sb, static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); +static void ext4_umount_end(struct super_block *sb, int flags); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); @@ -1315,6 +1316,7 @@ static const struct super_operations ext4_sops = { .freeze_fs = ext4_freeze, .unfreeze_fs = ext4_unfreeze, .statfs = ext4_statfs, + .umount_end = ext4_umount_end, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA @@ -5027,6 +5029,25 @@ struct ext4_mount_options { #endif }; +static void ext4_umount_end(struct super_block *sb, int flags) +{ + /* + * this is called at the end of umount(2). If there is an unclosed + * namespace, ext4 won't do put_super() which triggers fsck in the + * next boot. + */ + if ((flags & MNT_FORCE) || atomic_read(&sb->s_active) > 1) { + ext4_msg(sb, KERN_ERR, + "errors=remount-ro for active namespaces on umount %x", + flags); + clear_opt(sb, ERRORS_PANIC); + set_opt(sb, ERRORS_RO); + /* to write the latest s_kbytes_written */ + if (!(sb->s_flags & MS_RDONLY)) + ext4_commit_super(sb, 1); + } +} + static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index d503bdfd3c57801d8b88d4a20e53083532fe3cda..abe6d6dc40dc6fcfa9ded0d11e2fedf71fd2c933 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -457,6 +457,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) bio_put(bio); return -EFAULT; } + fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; bio_set_op_attrs(bio, fio->op, fio->op_flags); __submit_bio(fio->sbi, bio, fio->type); @@ -503,6 +504,7 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio) dun = PG_DUN(inode, fio->page); bi_crypt_skip = fio->encrypted_page ? 1 : 0; bio_encrypted = f2fs_may_encrypt_bio(inode, fio); + fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0; /* set submitted = true as a return value */ fio->submitted = true; @@ -568,9 +570,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, return ERR_PTR(-ENOMEM); f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; - bio_set_op_attrs(bio, REQ_OP_READ, 0); + bio_set_op_attrs(bio, REQ_OP_READ, + (f2fs_encrypted_inode(inode) ? + REQ_NOENCRYPT : + 0)); - if (f2fs_encrypted_file(inode)) + if (f2fs_encrypted_file(inode) && + !fscrypt_using_hardware_encryption(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (post_read_steps) { ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); @@ -1553,9 +1559,9 @@ static int f2fs_mpage_readpages(struct address_space *mapping, bio = NULL; goto set_error_page; } + if (bio_encrypted) + fscrypt_set_ice_dun(inode, bio, dun); } - if (bio_encrypted) - fscrypt_set_ice_dun(inode, bio, dun); if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; @@ -2113,6 +2119,18 @@ static int f2fs_write_cache_pages(struct address_space *mapping, return ret; } +static inline bool __should_serialize_io(struct inode *inode, + struct writeback_control *wbc) +{ + if (!S_ISREG(inode->i_mode)) + return false; + if (wbc->sync_mode != WB_SYNC_ALL) + return true; + if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) + return true; + return false; +} + int __f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc, enum iostat_type io_type) @@ -2121,6 +2139,7 @@ int __f2fs_write_data_pages(struct address_space *mapping, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct blk_plug plug; int ret; + bool locked = false; /* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) @@ -2151,10 +2170,18 @@ int __f2fs_write_data_pages(struct address_space *mapping, else if (atomic_read(&sbi->wb_sync_req)) goto skip_write; + if (__should_serialize_io(inode, wbc)) { + mutex_lock(&sbi->writepages); + locked = true; + } + blk_start_plug(&plug); ret = f2fs_write_cache_pages(mapping, wbc, io_type); blk_finish_plug(&plug); + if (locked) + mutex_unlock(&sbi->writepages); + if (wbc->sync_mode == WB_SYNC_ALL) atomic_dec(&sbi->wb_sync_req); /* diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7e320817af207e00610a4acd210bb695f2163a9e..03c0f5707205d085e6f0d13e24028ab5e79d76d9 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -887,6 +887,7 @@ struct f2fs_sm_info { unsigned int ipu_policy; /* in-place-update policy */ unsigned int min_ipu_util; /* in-place-update threshold */ unsigned int min_fsync_blocks; /* threshold for fsync */ + unsigned int min_seq_blocks; /* threshold for sequential blocks */ unsigned int min_hot_blocks; /* threshold for hot block allocation */ unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ @@ -1097,6 +1098,7 @@ struct f2fs_sb_info { struct rw_semaphore sb_lock; /* lock for raw super block */ int valid_super_block; /* valid super block no */ unsigned long s_flag; /* flags for sbi */ + struct mutex writepages; /* mutex for writepages() */ #ifdef CONFIG_BLK_DEV_ZONED unsigned int blocks_per_blkz; /* F2FS blocks per zone */ @@ -3281,7 +3283,7 @@ static inline bool f2fs_may_encrypt(struct inode *inode) static inline bool f2fs_force_buffered_io(struct inode *inode, int rw) { - return ((f2fs_post_read_required(inode) && + return ((f2fs_encrypted_file(inode) && !fscrypt_using_hardware_encryption(inode)) || (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) || F2FS_I_SB(inode)->s_ndevs); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 218d3aba46f763741fad0d89168508a731af044e..8ad3ac615c1c7c9a38559d742a09880cbd051388 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2449,23 +2449,24 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) if (err) goto out; - start_block = START_BLOCK(sbi, start_segno); - end_block = START_BLOCK(sbi, end_segno + 1); - - __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); - __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); - /* * We filed discard candidates, but actually we don't need to wait for * all of them, since they'll be issued in idle time along with runtime * discard option. User configuration looks like using runtime discard * or periodic fstrim instead of it. */ - if (!test_opt(sbi, DISCARD)) { - trimmed = __wait_discard_cmd_range(sbi, &dpolicy, + if (test_opt(sbi, DISCARD)) + goto out; + + start_block = START_BLOCK(sbi, start_segno); + end_block = START_BLOCK(sbi, end_segno + 1); + + __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); + __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); + + trimmed = __wait_discard_cmd_range(sbi, &dpolicy, start_block, end_block); - range->len = F2FS_BLK_TO_BYTES(trimmed); - } + range->len = F2FS_BLK_TO_BYTES(trimmed); out: return err; } @@ -3850,6 +3851,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; + sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->min_ssr_sections = reserved_sections(sbi); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index e9ea8b64512b7b002af411b2774423efc5000204..147673487c6051ea94bbc3b8636a9ceb2b882d30 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -982,6 +982,24 @@ static void destroy_device_list(struct f2fs_sb_info *sbi) kfree(sbi->devs); } +static void f2fs_umount_end(struct super_block *sb, int flags) +{ + /* + * this is called at the end of umount(2). If there is an unclosed + * namespace, f2fs won't do put_super() which triggers fsck in the + * next boot. + */ + if ((flags & MNT_FORCE) || atomic_read(&sb->s_active) > 1) { + /* to write the latest kbytes_written */ + if (!(sb->s_flags & MS_RDONLY)) { + struct cp_control cpc = { + .reason = CP_UMOUNT, + }; + write_checkpoint(F2FS_SB(sb), &cpc); + } + } +} + static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); @@ -1896,6 +1914,7 @@ static const struct super_operations f2fs_sops = { #endif .evict_inode = f2fs_evict_inode, .put_super = f2fs_put_super, + .umount_end = f2fs_umount_end, .sync_fs = f2fs_sync_fs, .freeze_fs = f2fs_freeze, .unfreeze_fs = f2fs_unfreeze, @@ -2719,6 +2738,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* init f2fs-specific super block info */ sbi->valid_super_block = valid_super_block; mutex_init(&sbi->gc_mutex); + mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_change); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 2c53de9251becae26f05e13d06acc23239beb3ca..26cb8550bec45d87f895c944a29178dad1008135 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -359,6 +359,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh); @@ -411,6 +412,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(ipu_policy), ATTR_LIST(min_ipu_util), ATTR_LIST(min_fsync_blocks), + ATTR_LIST(min_seq_blocks), ATTR_LIST(min_hot_blocks), ATTR_LIST(min_ssr_sections), ATTR_LIST(max_victim_search), diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 48b2336692f9f70a3d8c230ee3a9169af5e634be..8428d4bc46a88d052195e79a5ecb64de0563b5da 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -92,7 +92,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent, err_brelse: brelse(bhs[0]); err: - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); + fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)", + (llu)blocknr); return -EIO; } @@ -105,8 +106,9 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, fatent->fat_inode = MSDOS_SB(sb)->fat_inode; fatent->bhs[0] = sb_bread(sb, blocknr); if (!fatent->bhs[0]) { - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", - (llu)blocknr); + fat_msg_ratelimit(sb, KERN_ERR, + "FAT read failed (blocknr %llu)", + (llu)blocknr); return -EIO; } fatent->nr_bhs = 1; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 3b40937b942a428c45907daf945c16b662e694c9..204d5ca813da3ef942ba481425f30d321078da51 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -625,10 +625,8 @@ static void fat_free_eofblocks(struct inode *inode) */ err = __fat_write_inode(inode, inode_needs_sync(inode)); if (err) { - fat_msg(inode->i_sb, KERN_WARNING, "Failed to " - "update on disk inode for unused " - "fallocated blocks, inode could be " - "corrupted. Please run fsck"); + fat_msg_ratelimit(inode->i_sb, KERN_WARNING, + "Failed to update on disk inode for unused fallocated blocks, inode could be corrupted. Please run fsck"); } } @@ -672,7 +670,7 @@ static void fat_set_state(struct super_block *sb, bh = sb_bread(sb, 0); if (bh == NULL) { - fat_msg(sb, KERN_ERR, "unable to read boot sector " + fat_msg_ratelimit(sb, KERN_ERR, "unable to read boot sector " "to mark fs as dirty"); return; } @@ -851,7 +849,7 @@ static int __fat_write_inode(struct inode *inode, int wait) fat_get_blknr_offset(sbi, i_pos, &blocknr, &offset); bh = sb_bread(sb, blocknr); if (!bh) { - fat_msg(sb, KERN_ERR, "unable to read inode block " + fat_msg_ratelimit(sb, KERN_ERR, "unable to read inode block " "for updating (i_pos %lld)", i_pos); return -EIO; } diff --git a/fs/fat/misc.c b/fs/fat/misc.c index acc3aa30ee54988bd99e172e3db2b8ae83067c31..23cc2676dfa6d70a4d75edd4eb49c8e13ec484b0 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c @@ -68,7 +68,7 @@ int fat_clusters_flush(struct super_block *sb) bh = sb_bread(sb, sbi->fsinfo_sector); if (bh == NULL) { - fat_msg(sb, KERN_ERR, "bread failed in fat_clusters_flush"); + fat_msg_ratelimit(sb, KERN_ERR, "bread failed in %s", __func__); return -EIO; } diff --git a/fs/file_table.c b/fs/file_table.c index 61517f57f8ef744e9915c33659cfba96c0a1f767..19dfe6159eb2b4319da8513cfcec2102e7cb4c62 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -261,6 +261,12 @@ void flush_delayed_fput(void) static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); +void flush_delayed_fput_wait(void) +{ + delayed_fput(NULL); + flush_delayed_work(&delayed_fput_work); +} + void fput(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { diff --git a/fs/namespace.c b/fs/namespace.c index 5660c128bc843a08f74e670ad8492111d50e9e9f..7adc8bb88225b1dab36b3d00cc94e68dbd92f9c4 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -21,6 +21,7 @@ #include /* get_fs_root et.al. */ #include /* fsnotify_vfsmount_delete */ #include +#include #include #include #include @@ -661,12 +662,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) return 0; mnt = real_mount(bastard); mnt_add_count(mnt, 1); + smp_mb(); // see mntput_no_expire() if (likely(!read_seqretry(&mount_lock, seq))) return 0; if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { mnt_add_count(mnt, -1); return 1; } + lock_mount_hash(); + if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { + mnt_add_count(mnt, -1); + unlock_mount_hash(); + return 1; + } + unlock_mount_hash(); + /* caller will mntput() */ return -1; } @@ -1033,7 +1043,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void if (!mnt) return ERR_PTR(-ENOMEM); - mnt->mnt.data = NULL; if (type->alloc_mnt_data) { mnt->mnt.data = type->alloc_mnt_data(); if (!mnt->mnt.data) { @@ -1047,7 +1056,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void root = mount_fs(type, flags, name, &mnt->mnt, data); if (IS_ERR(root)) { - kfree(mnt->mnt.data); mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_CAST(root); @@ -1169,7 +1177,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, return mnt; out_free: - kfree(mnt->mnt.data); mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_PTR(err); @@ -1213,15 +1220,36 @@ static void delayed_mntput(struct work_struct *unused) } static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); +void flush_delayed_mntput_wait(void) +{ + delayed_mntput(NULL); + flush_delayed_work(&delayed_mntput_work); +} + static void mntput_no_expire(struct mount *mnt) { rcu_read_lock(); - mnt_add_count(mnt, -1); - if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ + if (likely(READ_ONCE(mnt->mnt_ns))) { + /* + * Since we don't do lock_mount_hash() here, + * ->mnt_ns can change under us. However, if it's + * non-NULL, then there's a reference that won't + * be dropped until after an RCU delay done after + * turning ->mnt_ns NULL. So if we observe it + * non-NULL under rcu_read_lock(), the reference + * we are dropping is not the final one. + */ + mnt_add_count(mnt, -1); rcu_read_unlock(); return; } lock_mount_hash(); + /* + * make sure that if __legitimize_mnt() has not seen us grab + * mount_lock, we'll see their refcount increment here. + */ + smp_mb(); + mnt_add_count(mnt, -1); if (mnt_get_count(mnt)) { rcu_read_unlock(); unlock_mount_hash(); @@ -1708,6 +1736,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) struct mount *mnt; int retval; int lookup_flags = 0; + bool user_request = !(current->flags & PF_KTHREAD); if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; @@ -1733,11 +1762,35 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) goto dput_and_out; + /* flush delayed_fput to put mnt_count */ + if (user_request) + flush_delayed_fput_wait(); + retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); + + if (!user_request) + goto out; + + if (!retval) { + /* + * If the last delayed_fput() is called during do_umount() + * and makes mnt_count zero, we need to guarantee to register + * delayed_mntput by waiting for delayed_fput work again. + */ + flush_delayed_fput_wait(); + + /* flush delayed_mntput_work to put sb->s_active */ + flush_delayed_mntput_wait(); + } + if (!retval || (flags & MNT_FORCE)) { + /* filesystem needs to handle unclosed namespaces */ + if (mnt->mnt.mnt_sb->s_op->umount_end) + mnt->mnt.mnt_sb->s_op->umount_end(mnt->mnt.mnt_sb, flags); + } out: return retval; } diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c index d5701dd9bfd4fd0dfcb528ca47f8e71e219c2677..1ad7718c05d99b2c566975464e54412118b3496a 100644 --- a/fs/sdcardfs/main.c +++ b/fs/sdcardfs/main.c @@ -295,6 +295,13 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb, atomic_inc(&lower_sb->s_active); sdcardfs_set_lower_super(sb, lower_sb); + sb->s_stack_depth = lower_sb->s_stack_depth + 1; + if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { + pr_err("sdcardfs: maximum fs stacking depth exceeded\n"); + err = -EINVAL; + goto out_sput; + } + /* inherit maxbytes from lower file system */ sb->s_maxbytes = lower_sb->s_maxbytes; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2142bceaeb75919028700e5ffad6412576c07d51..f00421dfacbd06bf6dba7f9f7d7783f0518135dd 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -991,8 +991,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); -int pud_free_pmd_page(pud_t *pud); -int pmd_free_pte_page(pmd_t *pmd); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { @@ -1018,11 +1018,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } -static inline int pud_free_pmd_page(pud_t *pud) +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return 0; } -static inline int pmd_free_pte_page(pmd_t *pmd) +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return 0; } @@ -1055,6 +1055,18 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, static inline void init_espfix_bsp(void) { } #endif +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h deleted file mode 100644 index 6b700c7b2fe1cba3cd054a200eac450ac4bde7be..0000000000000000000000000000000000000000 --- a/include/crypto/vmac.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Modified to interface to the Linux kernel - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -#ifndef __CRYPTO_VMAC_H -#define __CRYPTO_VMAC_H - -/* -------------------------------------------------------------------------- - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Please send bug reports to the authors. - * Last modified: 17 APR 08, 1700 PDT - * ----------------------------------------------------------------------- */ - -/* - * User definable settings. - */ -#define VMAC_TAG_LEN 64 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ - -/* - * This implementation uses u32 and u64 as names for unsigned 32- - * and 64-bit integer types. These are defined in C99 stdint.h. The - * following may need adaptation if you are not running a C99 or - * Microsoft C environment. - */ -struct vmac_ctx { - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; - u64 polykey[2*VMAC_TAG_LEN/64]; - u64 l3key[2*VMAC_TAG_LEN/64]; - u64 polytmp[2*VMAC_TAG_LEN/64]; - u64 cached_nonce[2]; - u64 cached_aes[2]; - int first_block_processed; -}; - -typedef u64 vmac_t; - -struct vmac_ctx_t { - struct crypto_cipher *child; - struct vmac_ctx __vmac_ctx; - u8 partial[VMAC_NHBYTES]; /* partial block */ - int partial_size; /* size of the partial block */ -}; - -#endif /* __CRYPTO_VMAC_H */ diff --git a/include/dt-bindings/clock/mdss-28nm-pll-clk.h b/include/dt-bindings/clock/mdss-28nm-pll-clk.h index 45e12ca5f923271600b5006c2d493d850ecfefca..e81c7309f42e4e02c777b427af028dedafc87e96 100644 --- a/include/dt-bindings/clock/mdss-28nm-pll-clk.h +++ b/include/dt-bindings/clock/mdss-28nm-pll-clk.h @@ -28,4 +28,12 @@ #define BYTECLK_SRC_1_CLK 10 #define PCLK_SRC_1_CLK 11 +/* HDMI PLL clocks */ +#define HDMI_VCO_CLK 0 +#define HDMI_VCO_DIVIDED_1_CLK_SRC 1 +#define HDMI_VCO_DIVIDED_TWO_CLK_SRC 2 +#define HDMI_VCO_DIVIDED_FOUR_CLK_SRC 3 +#define HDMI_VCO_DIVIDED_SIX_CLK_SRC 4 +#define HDMI_PCLK_SRC_MUX 5 +#define HDMI_PCLK_SRC 6 #endif diff --git a/include/dt-bindings/clock/qcom,cpucc-sm8150.h b/include/dt-bindings/clock/qcom,cpucc-sm8150.h index 500db1fc7b2ee348faa8a6361d2fac1134cc2aae..93b544a85d9d6cad2019904e05d2feac39253622 100644 --- a/include/dt-bindings/clock/qcom,cpucc-sm8150.h +++ b/include/dt-bindings/clock/qcom,cpucc-sm8150.h @@ -16,22 +16,23 @@ #define L3_CLUSTER0_VOTE_CLK 0 #define L3_CLUSTER1_VOTE_CLK 1 -#define L3_MISC_VOTE_CLK 2 -#define L3_CLK 3 -#define CPU0_PWRCL_CLK 4 -#define CPU1_PWRCL_CLK 5 -#define CPU2_PWRCL_CLK 6 -#define CPU3_PWRCL_CLK 7 -#define PWRCL_CLK 8 -#define CPU4_PERFCL_CLK 9 -#define CPU5_PERFCL_CLK 10 -#define CPU6_PERFCL_CLK 11 -#define PERFCL_CLK 12 -#define CPU7_PERFPCL_CLK 13 -#define PERFPCL_CLK 14 -#define CPU7_PERFCL_CLK 15 -#define L3_GPU_VOTE_CLK 16 -#define CPU4_PWRCL_CLK 17 -#define CPU5_PWRCL_CLK 18 +#define L3_CLUSTER2_VOTE_CLK 2 +#define L3_MISC_VOTE_CLK 3 +#define L3_GPU_VOTE_CLK 4 +#define L3_CLK 5 +#define CPU0_PWRCL_CLK 6 +#define CPU1_PWRCL_CLK 7 +#define CPU2_PWRCL_CLK 8 +#define CPU3_PWRCL_CLK 9 +#define CPU4_PWRCL_CLK 10 +#define CPU5_PWRCL_CLK 11 +#define PWRCL_CLK 12 +#define CPU4_PERFCL_CLK 13 +#define CPU5_PERFCL_CLK 14 +#define CPU6_PERFCL_CLK 15 +#define CPU7_PERFCL_CLK 16 +#define PERFCL_CLK 17 +#define CPU7_PERFPCL_CLK 18 +#define PERFPCL_CLK 19 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6150.h b/include/dt-bindings/clock/qcom,gcc-sm6150.h index a6c5929e1ceb73c2da61a6ea475336f04f37cae5..57a79d39c15d4d60795aa737cd8823f950e84b93 100644 --- a/include/dt-bindings/clock/qcom,gcc-sm6150.h +++ b/include/dt-bindings/clock/qcom,gcc-sm6150.h @@ -190,6 +190,7 @@ #define GCC_RX1_USB2_CLKREF_CLK 170 #define GCC_USB2_PRIM_CLKREF_CLK 171 #define GCC_USB2_SEC_CLKREF_CLK 172 +#define GCC_RX3_USB2_CLKREF_CLK 173 /* GCC Resets */ #define GCC_QUSB2PHY_PRIM_BCR 0 diff --git a/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h b/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h index d0df54ca4099c8958b915cc86112dfaa00577d71..a119a7a720928f7eaa2748d054014b225f42c417 100644 --- a/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h +++ b/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h @@ -16,23 +16,25 @@ #define GPU_CC_PLL0 0 #define GPU_CC_PLL0_OUT_EVEN 1 -#define GPU_CC_ACD_AHB_CLK 2 -#define GPU_CC_ACD_CXO_CLK 3 -#define GPU_CC_AHB_CLK 4 -#define GPU_CC_CRC_AHB_CLK 5 -#define GPU_CC_CX_APB_CLK 6 -#define GPU_CC_CX_GFX3D_CLK 7 -#define GPU_CC_CX_GFX3D_SLV_CLK 8 -#define GPU_CC_CX_GMU_CLK 9 -#define GPU_CC_CX_SNOC_DVM_CLK 10 -#define GPU_CC_CXO_AON_CLK 11 -#define GPU_CC_CXO_CLK 12 -#define GPU_CC_GMU_CLK_SRC 13 -#define GPU_CC_GX_CXO_CLK 14 -#define GPU_CC_GX_GFX3D_CLK 15 -#define GPU_CC_GX_GFX3D_CLK_SRC 16 -#define GPU_CC_GX_GMU_CLK 17 -#define GPU_CC_GX_VSENSE_CLK 18 -#define GPU_CC_SLEEP_CLK 19 +#define GPU_CC_PLL1 2 +#define GPU_CC_PLL1_OUT_EVEN 3 +#define GPU_CC_ACD_AHB_CLK 4 +#define GPU_CC_ACD_CXO_CLK 5 +#define GPU_CC_AHB_CLK 6 +#define GPU_CC_CRC_AHB_CLK 7 +#define GPU_CC_CX_APB_CLK 8 +#define GPU_CC_CX_GFX3D_CLK 9 +#define GPU_CC_CX_GFX3D_SLV_CLK 10 +#define GPU_CC_CX_GMU_CLK 11 +#define GPU_CC_CX_SNOC_DVM_CLK 12 +#define GPU_CC_CXO_AON_CLK 13 +#define GPU_CC_CXO_CLK 14 +#define GPU_CC_GMU_CLK_SRC 15 +#define GPU_CC_GX_CXO_CLK 16 +#define GPU_CC_GX_GFX3D_CLK 17 +#define GPU_CC_GX_GFX3D_CLK_SRC 18 +#define GPU_CC_GX_GMU_CLK 19 +#define GPU_CC_GX_VSENSE_CLK 20 +#define GPU_CC_SLEEP_CLK 21 #endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6150.h b/include/dt-bindings/clock/qcom,gpucc-sm6150.h index 405f6dd09e69fbf69b9ae0849a53c05cad93a660..9f7f7edb155b0479e444d70bac555d861702de59 100644 --- a/include/dt-bindings/clock/qcom,gpucc-sm6150.h +++ b/include/dt-bindings/clock/qcom,gpucc-sm6150.h @@ -14,23 +14,27 @@ #ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6150_H #define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6150_H +/* Hardware clocks */ +#define CRC_DIV_PLL0_OUT_AUX2 0 +#define CRC_DIV_PLL1_OUT_AUX2 1 + /* GPUCC clock registers */ -#define GPU_CC_PLL0_OUT_AUX2 0 -#define GPU_CC_PLL1_OUT_AUX2 1 -#define GPU_CC_CRC_AHB_CLK 2 -#define GPU_CC_CX_APB_CLK 3 -#define GPU_CC_CX_GFX3D_CLK 4 -#define GPU_CC_CX_GFX3D_SLV_CLK 5 -#define GPU_CC_CX_GMU_CLK 6 -#define GPU_CC_CX_SNOC_DVM_CLK 7 -#define GPU_CC_CXO_AON_CLK 8 -#define GPU_CC_CXO_CLK 9 -#define GPU_CC_GMU_CLK_SRC 10 -#define GPU_CC_SLEEP_CLK 11 -#define GPU_CC_GX_GMU_CLK 12 -#define GPU_CC_GX_CXO_CLK 13 -#define GPU_CC_GX_GFX3D_CLK 14 -#define GPU_CC_GX_GFX3D_CLK_SRC 15 -#define GPU_CC_AHB_CLK 16 +#define GPU_CC_PLL0_OUT_AUX2 2 +#define GPU_CC_PLL1_OUT_AUX2 3 +#define GPU_CC_CRC_AHB_CLK 4 +#define GPU_CC_CX_APB_CLK 5 +#define GPU_CC_CX_GFX3D_CLK 6 +#define GPU_CC_CX_GFX3D_SLV_CLK 7 +#define GPU_CC_CX_GMU_CLK 8 +#define GPU_CC_CX_SNOC_DVM_CLK 9 +#define GPU_CC_CXO_AON_CLK 10 +#define GPU_CC_CXO_CLK 11 +#define GPU_CC_GMU_CLK_SRC 12 +#define GPU_CC_SLEEP_CLK 13 +#define GPU_CC_GX_GMU_CLK 14 +#define GPU_CC_GX_CXO_CLK 15 +#define GPU_CC_GX_GFX3D_CLK 16 +#define GPU_CC_GX_GFX3D_CLK_SRC 17 +#define GPU_CC_AHB_CLK 18 #endif diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h index f620407aa80c23cfe66016ac357ca45f18954913..3972c0167060f49bd3998f1a5941b8b0b99dbdbd 100644 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ b/include/dt-bindings/msm/msm-bus-ids.h @@ -96,6 +96,13 @@ #define MSM_BUS_BCM_CO0 7041 #define MSM_BUS_BCM_CO1 7042 #define MSM_BUS_BCM_CO2 7043 +#define MSM_BUS_BCM_QP0 7043 +#define MSM_BUS_BCM_PN0 7044 +#define MSM_BUS_BCM_PN1 7045 +#define MSM_BUS_BCM_PN2 7046 +#define MSM_BUS_BCM_PN3 7047 +#define MSM_BUS_BCM_PN4 7048 +#define MSM_BUS_BCM_PN5 7049 #define MSM_BUS_RSC_APPS 8000 #define MSM_BUS_RSC_DISP 8001 @@ -282,6 +289,11 @@ #define MSM_BUS_MASTER_SENSORS_AHB 170 #define MSM_BUS_MASTER_CAMNOC_NRT 171 #define MSM_BUS_MASTER_CAMNOC_RT 172 +#define MSM_BUS_MASTER_SPMI_FETCHER 173 +#define MSM_BUS_MASTER_ANOC_SNOC 174 +#define MSM_BUS_MASTER_ANOC_IPA 175 +#define MSM_BUS_MASTER_IPA_PCIE 176 +#define MSM_BUS_MASTER_MEM_NOC_PCIE_SNOC 177 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001 @@ -661,6 +673,10 @@ #define MSM_BUS_SLAVE_CAMERA_NRT_THROTTLE_CFG 808 #define MSM_BUS_SLAVE_CAMERA_RT_THROTTLE_CFG 809 #define MSM_BUS_SLAVE_VENUS_CVP_THROTTLE_CFG 810 +#define MSM_BUS_SLAVE_ANOC_SNOC 811 +#define MSM_BUS_SLAVE_ANOC_IPA 812 +#define MSM_BUS_SLAVE_ECC_CFG 813 +#define MSM_BUS_SLAVE_SPMI_VGI_COEX 814 #define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512 #define MSM_BUS_SLAVE_LLCC_DISPLAY 20513 diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 30f317099366a47e1e092cf2fbe9fb39325689df..4ca9dda8db2494ad234c4df53274ca9ccc013d3e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -245,6 +245,13 @@ enum req_flag_bits { __REQ_URGENT, /* urgent request */ __REQ_NOWAIT, /* Don't wait if request will block */ + + /* Android specific flags */ + __REQ_NOENCRYPT, /* + * ok to not encrypt (already encrypted at fs + * level) + */ + __REQ_NR_BITS, /* stops here */ }; @@ -262,6 +269,7 @@ enum req_flag_bits { #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) +#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3a488f8b2e149d3d8fa42767824f4db8a29762d..2d228d9e6a617cf0592ca57828fa1a3d26ea745e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -154,6 +154,7 @@ struct request { unsigned int __data_len; /* total data len */ int tag; sector_t __sector; /* sector cursor */ + u64 __dun; /* dun for UFS */ struct bio *bio; struct bio *biotail; @@ -1034,6 +1035,11 @@ static inline sector_t blk_rq_pos(const struct request *rq) return rq->__sector; } +static inline sector_t blk_rq_dun(const struct request *rq) +{ + return rq->__dun; +} + static inline unsigned int blk_rq_bytes(const struct request *rq) { return rq->__data_len; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2c586c0a87cb60a283a21f5fdf834eeec0228e83..a96d8a9f2769c304433bb60fe8dbb20b3260c25c 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -36,12 +36,14 @@ #define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ /* parents need enable during gate/ungate, set rate and re-parent */ #define CLK_OPS_PARENT_ENABLE BIT(12) -#define CLK_ENABLE_HAND_OFF BIT(13) /* enable clock when registered. */ +/* duty cycle call may be forwarded to the parent clock */ +#define CLK_DUTY_CYCLE_PARENT BIT(13) +#define CLK_ENABLE_HAND_OFF BIT(14) /* enable clock when registered. */ /* * hand-off enable_count & prepare_count * to first consumer that enables clk */ -#define CLK_IS_MEASURE BIT(14) /* measure clock */ +#define CLK_IS_MEASURE BIT(15) /* measure clock */ struct clk; struct clk_hw; @@ -70,6 +72,17 @@ struct clk_rate_request { struct clk_hw *best_parent_hw; }; +/** + * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * + * @num: Numerator of the duty cycle ratio + * @den: Denominator of the duty cycle ratio + */ +struct clk_duty { + unsigned int num; + unsigned int den; +}; + /** * struct clk_ops - Callback operations for hardware clocks; these are to * be provided by the clock implementation, and will be called by drivers @@ -173,6 +186,15 @@ struct clk_rate_request { * by the second argument. Valid values for degrees are * 0-359. Return 0 on success, otherwise -EERROR. * + * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio + * of a clock. Returned values denominator cannot be 0 and must be + * superior or equal to the numerator. + * + * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by + * the numerator (2nd argurment) and denominator (3rd argument). + * Argument must be a valid ratio (denominator > 0 + * and >= numerator) Return 0 on success, otherwise -EERROR. + * * @init: Perform platform-specific initialization magic. * This is not not used by any of the basic clock types. * Please consider other ways of solving initialization problems @@ -194,6 +216,9 @@ struct clk_rate_request { * clock that is below rate_max. Return -ENXIO in case there is * no frequency table. * + * @bus_vote: Votes for bandwidth on certain config slaves to connect + * ports in order to gain access to clock controllers. + * * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow * implementations to split any work between atomic (enable) and sleepable * (prepare) contexts. If enabling a clock requires code that might sleep, @@ -231,6 +256,10 @@ struct clk_ops { unsigned long parent_accuracy); int (*get_phase)(struct clk_hw *hw); int (*set_phase)(struct clk_hw *hw, int degrees); + int (*get_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); + int (*set_duty_cycle)(struct clk_hw *hw, + struct clk_duty *duty); void (*init)(struct clk_hw *hw); int (*debug_init)(struct clk_hw *hw, struct dentry *dentry); int (*set_flags)(struct clk_hw *hw, unsigned int flags); @@ -238,6 +267,7 @@ struct clk_ops { struct clk_hw *hw); long (*list_rate)(struct clk_hw *hw, unsigned int n, unsigned long rate_max); + void (*bus_vote)(struct clk_hw *hw, bool enable); }; /** @@ -252,6 +282,7 @@ struct clk_ops { * @vdd_class: voltage scaling requirement class * @rate_max: maximum clock rate in Hz supported at each voltage level * @num_rate_max: number of maximum voltage level supported + * @bus_cl_id: client id registered with the bus driver used for bw votes */ struct clk_init_data { const char *name; @@ -262,6 +293,7 @@ struct clk_init_data { struct clk_vdd_class *vdd_class; unsigned long *rate_max; int num_rate_max; + unsigned int bus_cl_id; }; struct regulator; diff --git a/include/linux/clk.h b/include/linux/clk.h index 3d7ad23e0741ac3e5238db87495538a9fcd7b90c..7f4aab9b987280af81c89e55f75ba0fb4ac18295 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -141,6 +141,27 @@ int clk_set_phase(struct clk *clk, int degrees); */ int clk_get_phase(struct clk *clk); +/** + * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal + * @clk: clock signal source + * @num: numerator of the duty cycle ratio to be applied + * @den: denominator of the duty cycle ratio to be applied + * + * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); + +/** + * clk_get_duty_cycle - return the duty cycle ratio of a clock signal + * @clk: clock signal source + * @scale: scaling factor to be applied to represent the ratio as an integer + * + * Returns the duty cycle ratio multiplied by the scale provided, otherwise + * returns -EERROR. + */ +int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); + /** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q @@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk) return -ENOTSUPP; } +static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, + unsigned int den) +{ + return -ENOTSUPP; +} + +static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, + unsigned int scale) +{ + return 0; +} + static inline bool clk_is_match(const struct clk *p, const struct clk *q) { return p == q; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 9f32244860693050db81354bb52978864b78fa44..edcb1e452d21796cbe0c4f4819db7221efef6d66 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,7 +30,7 @@ struct cpu { }; extern void boot_cpu_init(void); -extern void boot_cpu_state_init(void); +extern void boot_cpu_hotplug_init(void); extern void cpu_init(void); extern void trap_init(void); @@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, @@ -183,4 +185,23 @@ void idle_notifier_register(struct notifier_block *n); void idle_notifier_unregister(struct notifier_block *n); void idle_notifier_call_chain(unsigned long val); +enum cpuhp_smt_control { + CPU_SMT_ENABLED, + CPU_SMT_DISABLED, + CPU_SMT_FORCE_DISABLED, + CPU_SMT_NOT_SUPPORTED, +}; + +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) +extern enum cpuhp_smt_control cpu_smt_control; +extern void cpu_smt_disable(bool force); +extern void cpu_smt_check_topology_early(void); +extern void cpu_smt_check_topology(void); +#else +# define cpu_smt_control (CPU_SMT_ENABLED) +static inline void cpu_smt_disable(bool force) { } +static inline void cpu_smt_check_topology_early(void) { } +static inline void cpu_smt_check_topology(void) { } +#endif + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ad33507b34be206abd45495f741a28c1c0883c35..113a14833ad3f8379b5ad7863183e72cf8d3d4aa 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -131,7 +131,8 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); @@ -163,7 +164,7 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *stop_tick) {return -ENODEV; } static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) @@ -246,7 +247,8 @@ struct cpuidle_governor { struct cpuidle_device *dev); int (*select) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); void (*reflect) (struct cpuidle_device *dev, int index); }; diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index d534b345dfd4472dfe58be7fe91796509d20a734..5d1f46be7a4ed1b018c0fb8e59a61c93a6b7a872 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -147,10 +147,10 @@ * a new RANGE of SSIDs to the msg_mask_tbl. */ #define MSG_MASK_TBL_CNT 26 -#define APPS_EVENT_LAST_ID 0xC7A +#define APPS_EVENT_LAST_ID 0xC85 #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 125 +#define MSG_SSID_0_LAST 129 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -166,7 +166,7 @@ #define MSG_SSID_7 4600 #define MSG_SSID_7_LAST 4616 #define MSG_SSID_8 5000 -#define MSG_SSID_8_LAST 5034 +#define MSG_SSID_8_LAST 5036 #define MSG_SSID_9 5500 #define MSG_SSID_9_LAST 5516 #define MSG_SSID_10 6000 @@ -220,7 +220,7 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_MED, MSG_LVL_HIGH, MSG_LVL_HIGH, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8, MSG_LVL_LOW, MSG_LVL_ERROR, MSG_LVL_LOW, @@ -354,6 +354,10 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_HIGH, MSG_LVL_LOW, MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_MED, + MSG_LVL_MED, MSG_LVL_HIGH }; @@ -425,81 +429,91 @@ static const uint32_t msg_bld_masks_5[] = { }; static const uint32_t msg_bld_masks_6[] = { - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW, - MSG_LVL_LOW + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, + MSG_LVL_LOW | MSG_MASK_5, }; static const uint32_t msg_bld_masks_7[] = { @@ -557,7 +571,9 @@ static const uint32_t msg_bld_masks_8[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_HIGH, + MSG_LVL_MED, + MSG_LVL_MED, + MSG_LVL_MED, MSG_LVL_HIGH }; @@ -786,19 +802,22 @@ static const uint32_t msg_bld_masks_17[] = { static const uint32_t msg_bld_masks_18[] = { MSG_LVL_LOW, - MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | + MSG_LVL_MED | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6, - MSG_LVL_LOW | MSG_MASK_5, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6, + MSG_LVL_MED | MSG_MASK_5, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6, MSG_LVL_LOW, - MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9, MSG_LVL_LOW, MSG_LVL_LOW, - MSG_LVL_LOW, + MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | + MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 | + MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 | + MSG_MASK_20 | MSG_MASK_21 | MSG_MASK_22, MSG_LVL_LOW }; @@ -897,7 +916,7 @@ static const uint32_t msg_bld_masks_25[] = { /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1C6A, /* EQUIP ID 1 */ + 0x1C7B, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ diff --git a/include/linux/dma-buf-ref.h b/include/linux/dma-buf-ref.h new file mode 100644 index 0000000000000000000000000000000000000000..5dffad9d5676f1a04ae771d70f9798746131b93a --- /dev/null +++ b/include/linux/dma-buf-ref.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DMA_BUF_REF_H +#define _DMA_BUF_REF_H + +struct dma_buf; +struct seq_file; + +#ifdef CONFIG_DEBUG_DMA_BUF_REF +void dma_buf_ref_init(struct dma_buf *b); +void dma_buf_ref_destroy(struct dma_buf *b); +void dma_buf_ref_mod(struct dma_buf *b, int nr); +int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf); + +#else +static inline void dma_buf_ref_init(struct dma_buf *b) {} +static inline void dma_buf_ref_destroy(struct dma_buf *b) {} +static inline void dma_buf_ref_mod(struct dma_buf *b, int nr) {} +static inline int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf) +{ + return -ENOMEM; +} +#endif + + +#endif /* _DMA_BUF_REF_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 93a0e03aa9cff86329e333c907141a347f81a999..e9dec43ad6cdb3ed3339d9740b2438fa3d89f50e 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -31,6 +31,7 @@ #include #include #include +#include #include struct device; @@ -383,6 +384,7 @@ struct dma_buf_ops { * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. * @name: unique name for the buffer + * @ktime: time (in jiffies) at which the buffer was born * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. @@ -411,7 +413,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; char *name; - struct timespec ctime; + ktime_t ktime; struct module *owner; struct list_head list_node; void *priv; @@ -426,6 +428,8 @@ struct dma_buf { unsigned long active; } cb_excl, cb_shared; + + struct list_head refs; }; /** @@ -498,6 +502,7 @@ struct dma_buf_export_info { static inline void get_dma_buf(struct dma_buf *dmabuf) { get_file(dmabuf->file); + dma_buf_ref_mod(dmabuf, 1); } struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 4bdd9dd873330aac49d976b06215d861932886af..5d4c71a766981ba1c981d8ed65f82dcde6b2017b 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -124,14 +124,19 @@ * @type: integer (intval) * @value: 0 (USB/USB2) or 1 (USB3) * @default: 0 (USB/USB2) + * - EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT + * @type: integer (intval) + * @value: 0 (default current), 1 (medium or high current) + * @default: 0 (default current) * */ #define EXTCON_PROP_USB_VBUS 0 #define EXTCON_PROP_USB_TYPEC_POLARITY 1 #define EXTCON_PROP_USB_SS 2 +#define EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT 3 #define EXTCON_PROP_USB_MIN 0 -#define EXTCON_PROP_USB_MAX 2 +#define EXTCON_PROP_USB_MAX 3 #define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1) /* Properties of EXTCON_TYPE_CHG. */ diff --git a/include/linux/file.h b/include/linux/file.h index 279720db984af394c90737ed31b8ec09ef73c2ca..da3eb2b5af8bdc7d9e3acdd0eb3eec8b7c683898 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -85,6 +85,7 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); extern void flush_delayed_fput(void); +extern void flush_delayed_fput_wait(void); extern void __fput_sync(struct file *); #endif /* __LINUX_FILE_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index b030db4098ee3328ea139c0e6f05a10b5df70066..84ed6fd29a246ffccd186e6eeead2697e63d05b6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1831,6 +1831,7 @@ struct super_operations { void *(*clone_mnt_data) (void *); void (*copy_mnt_data) (void *, void *); void (*umount_begin) (struct super_block *); + void (*umount_end) (struct super_block *, int); int (*show_options)(struct seq_file *, struct dentry *); int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 129b948bee5a08ec8a341a777eddfaefb3462e67..ddd5b3a3df20c5467e2757f91259128ad546e896 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -20,12 +20,9 @@ struct fscrypt_ctx; -/* iv sector for security/pfe/pfk_fscrypt.c and f2fs. sizeof is required - * to accommodate 32 bit targets. - */ +/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */ #define PG_DUN(i, p) \ - ((((i)->i_ino & 0xffffffff) << (sizeof((i)->i_ino)/2)) | \ - ((p)->index & 0xffffffff)) + (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff)) struct fscrypt_info; diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 68862592d8754ac5903ea23a2e9812f2365a02d0..035ac3589f047279f1008017fae13f6bba741383 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -199,7 +199,7 @@ static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip) } static inline bool fscrypt_mergeable_bio(struct bio *bio, - sector_t iv_block, bool bio_encrypted, int bi_crypt_skip) + u64 dun, bool bio_encrypted, int bi_crypt_skip) { return true; } diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h index f9ce85c0fe5f70f683a8fb3a43c72db00ff71e21..af36a6815518daf0144b9c57b976abf4b3bb24e0 100644 --- a/include/linux/hdcp_qseecom.h +++ b/include/linux/hdcp_qseecom.h @@ -69,6 +69,9 @@ void hdcp2_deinit(void *ctx); bool hdcp2_feature_supported(void *ctx); int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, struct hdcp2_app_data *app_data); +int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, + uint8_t stream_number, uint32_t *stream_id); +int hdcp2_close_stream(void *ctx, uint32_t stream_id); int hdcp2_force_encryption(void *ctx, uint32_t enable); #else static inline void *hdcp1_init(void) @@ -119,6 +122,17 @@ static inline int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, return 0; } +static inline int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, + uint8_t stream_number, uint32_t *stream_id) +{ + return 0; +} + +static inline int hdcp2_close_stream(void *ctx, uint32_t stream_id) +{ + return 0; +} + static inline int hdcp2_force_encryption(void *ctx, uint32_t enable) { return 0; diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index dd293a8f765e84b640e23d0c7d6a51218ec9a757..ceb4ac2365194fff4e5b02ae7eb854439bca8ccb 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -411,6 +411,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) } extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); diff --git a/include/linux/ipa.h b/include/linux/ipa.h index 05c14618e171677314a6e7eb97bacd7113e1044f..d9bf59ff6bd5cfb2499865cf41baab33fe374fc3 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -100,8 +100,6 @@ enum ipa_aggr_mode { enum ipa_dp_evt_type { IPA_RECEIVE, IPA_WRITE_DONE, - IPA_CLIENT_START_POLL, - IPA_CLIENT_COMP_NAPI, }; /** @@ -608,7 +606,7 @@ struct ipa_sys_connect_params { ipa_notify_cb notify; bool skip_ep_cfg; bool keep_ipa_awake; - bool napi_enabled; + struct napi_struct *napi_obj; bool recycle_enabled; }; diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 9385aa57497b80851cbe7bfd6adc83c60f03034c..a27cf66523279c1a5d4aaa0d0087f1e9d48d170f 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -62,8 +62,11 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) -/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ -#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ +#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) + +/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) #ifndef __jiffy_arch_data #define __jiffy_arch_data diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 4952f2ecf8679400226f46f61238c7e8aba7c0da..b257f6031d7ef2b89e32fd8f358e15686d7ef205 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -433,12 +433,12 @@ static inline bool mmc_card_support_auto_bkops(const struct mmc_card *c) static inline bool mmc_card_configured_manual_bkops(const struct mmc_card *c) { - return c->ext_csd.man_bkops_en & EXT_CSD_BKOPS_MANUAL_EN; + return c->ext_csd.man_bkops_en; } static inline bool mmc_card_configured_auto_bkops(const struct mmc_card *c) { - return c->ext_csd.auto_bkops_en & EXT_CSD_BKOPS_AUTO_EN; + return c->ext_csd.auto_bkops_en; } static inline bool mmc_enable_qca6574_settings(const struct mmc_card *c) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0cf555098c8234447b5a9cbaa147472ba924a517..0420ea6654b4f4724a2ea68ee1a4bd68cc7a490c 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -391,9 +391,9 @@ enum dev_state { * @upthreshold: up-threshold supplied to ondemand governor * @downthreshold: down-threshold supplied to ondemand governor * @need_freq_change: flag indicating if a frequency change is required - * @clk_scaling_in_progress: flag indicating if there's ongoing frequency change * @is_busy_started: flag indicating if a request is handled by the HW * @enable: flag indicating if the clock scaling logic is enabled for this host + * @is_suspended: to make devfreq request queued when mmc is suspened */ struct mmc_devfeq_clk_scaling { spinlock_t lock; @@ -418,9 +418,9 @@ struct mmc_devfeq_clk_scaling { unsigned int lower_bus_speed_mode; #define MMC_SCALING_LOWER_DDR52_MODE 1 bool need_freq_change; - bool clk_scaling_in_progress; bool is_busy_started; bool enable; + bool is_suspended; }; struct mmc_host { @@ -705,9 +705,12 @@ struct mmc_host { void *cmdq_private; struct mmc_request *err_mrq; + bool inlinecrypt_support; /* Inline encryption support */ + atomic_t rpmb_req_pending; struct mutex rpmb_req_mutex; unsigned long private[0] ____cacheline_aligned; + bool crash_on_err; /* crash the system on error */ }; struct device_node; diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index fa57290fd530cc24bd50d1fc7d5d7550e59f8d65..e4991ad7576693eabcd11aa485a8cd969727c2b1 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -39,6 +39,7 @@ enum gsi_status { GSI_STATUS_EVT_RING_INCOMPATIBLE = 10, GSI_STATUS_TIMED_OUT = 11, GSI_STATUS_AGAIN = 12, + GSI_STATUS_PENDING_IRQ = 13, }; enum gsi_per_evt { diff --git a/include/linux/msm_mhi_dev.h b/include/linux/msm_mhi_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..b96591b27f6a4677e3cadd71eb7ae5fdabc7ae6d --- /dev/null +++ b/include/linux/msm_mhi_dev.h @@ -0,0 +1,259 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_MHI_DEV_H +#define __MSM_MHI_DEV_H + +#include +#include + +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +enum cb_reason { + MHI_DEV_TRE_AVAILABLE = 0, + MHI_DEV_CTRL_UPDATE, +}; + +struct mhi_dev_client_cb_reason { + uint32_t ch_id; + enum cb_reason reason; +}; + +struct mhi_dev_client { + struct list_head list; + struct mhi_dev_channel *channel; + void (*event_trigger)(struct mhi_dev_client_cb_reason *cb); + + /* mhi_dev calls are fully synchronous -- only one call may be + * active per client at a time for now. + */ + struct mutex write_lock; + wait_queue_head_t wait; + + /* trace logs */ + spinlock_t tr_lock; + unsigned int tr_head; + unsigned int tr_tail; + struct mhi_dev_trace *tr_log; + + /* client buffers */ + struct mhi_dev_iov *iov; + uint32_t nr_iov; +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +struct mhi_req { + u32 chan; + u32 mode; + u32 chain; + void *buf; + dma_addr_t dma; + u32 snd_cmpl; + void *context; + size_t len; + size_t actual_len; + uint32_t rd_offset; + struct mhi_dev_client *client; + struct list_head list; + union mhi_dev_ring_element_type *el; + void (*client_cb)(void *req); +}; + +/* SW channel client list */ +enum mhi_client_channel { + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_IP_CTRL_0_OUT = 16, + MHI_CLIENT_IP_CTRL_0_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_DCI_OUT = 20, + MHI_CLIENT_DCI_IN = 21, + MHI_CLIENT_IP_CTRL_3_OUT = 22, + MHI_CLIENT_IP_CTRL_3_IN = 23, + MHI_CLIENT_IP_CTRL_4_OUT = 24, + MHI_CLIENT_IP_CTRL_4_IN = 25, + MHI_CLIENT_IP_CTRL_5_OUT = 26, + MHI_CLIENT_IP_CTRL_5_IN = 27, + MHI_CLIENT_IP_CTRL_6_OUT = 28, + MHI_CLIENT_IP_CTRL_6_IN = 29, + MHI_CLIENT_IP_CTRL_7_OUT = 30, + MHI_CLIENT_IP_CTRL_7_IN = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_IP_SW_0_OUT = 34, + MHI_CLIENT_IP_SW_0_IN = 35, + MHI_CLIENT_ADB_OUT = 36, + MHI_CLIENT_ADB_IN = 37, + MHI_CLIENT_IP_SW_2_OUT = 38, + MHI_CLIENT_IP_SW_2_IN = 39, + MHI_CLIENT_IP_SW_3_OUT = 40, + MHI_CLIENT_IP_SW_3_IN = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_4_OUT = 46, + MHI_CLIENT_IP_SW_4_IN = 47, + MHI_MAX_SOFTWARE_CHANNELS, + MHI_CLIENT_TEST_OUT = 60, + MHI_CLIENT_TEST_IN = 61, + MHI_CLIENT_RESERVED_1_LOWER = 62, + MHI_CLIENT_RESERVED_1_UPPER = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_RESERVED_2_LOWER = 102, + MHI_CLIENT_RESERVED_2_UPPER = 127, + MHI_MAX_CHANNELS = 102, + MHI_CLIENT_INVALID = 0xFFFFFFFF +}; + +struct mhi_dev_client_cb_data { + void *user_data; + enum mhi_client_channel channel; + enum mhi_ctrl_info ctrl_info; +}; + +typedef void (*mhi_state_cb)(struct mhi_dev_client_cb_data *cb_dat); + +struct mhi_dev_ready_cb_info { + struct list_head list; + mhi_state_cb cb; + struct mhi_dev_client_cb_data cb_data; +}; + +#if defined(CONFIG_MSM_MHI_DEV) +/** + * mhi_dev_open_channel() - Channel open for a given client done prior + * to read/write. + * @chan_id: Software Channel ID for the assigned client. + * @handle_client: Structure device for client handle. + * @notifier: Client issued callback notification. + */ +int mhi_dev_open_channel(uint32_t chan_id, + struct mhi_dev_client **handle_client, + void (*event_trigger)(struct mhi_dev_client_cb_reason *cb)); + +/** + * mhi_dev_close_channel() - Channel close for a given client. + */ +int mhi_dev_close_channel(struct mhi_dev_client *handle_client); + +/** + * mhi_dev_read_channel() - Channel read for a given client + * @mreq: mreq is the client argument which includes meta info + * like write data location, buffer len, read offset, mode, + * chain and client call back function which will be invoked + * when data read is completed. + */ +int mhi_dev_read_channel(struct mhi_req *mreq); + +/** + * mhi_dev_write_channel() - Channel write for a given software client. + * @wreq wreq is the client argument which includes meta info like + * client handle, read data location, buffer length, mode, + * and client call back function which will free the packet. + * when data write is completed. + */ +int mhi_dev_write_channel(struct mhi_req *wreq); + +/** + * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process. + * @handle_client: Client Handle issued during mhi_dev_open_channel + */ +int mhi_dev_channel_isempty(struct mhi_dev_client *handle); + +/** + * mhi_ctrl_state_info() - Provide MHI state info + * @idx: Channel number idx. Look at channel_state_info and + * pass the index for the corresponding channel. + * @info: Return the control info. + * MHI_STATE=CONFIGURED - MHI device is present but not ready + * for data traffic. + * MHI_STATE=CONNECTED - MHI device is ready for data transfer. + * MHI_STATE=DISCONNECTED - MHI device has its pipes suspended. + * exposes device nodes for the supported MHI software + * channels. + */ +int mhi_ctrl_state_info(uint32_t idx, uint32_t *info); + +/** + * mhi_register_state_cb() - Clients can register and receive callback after + * MHI channel is connected or disconnected. + */ +int mhi_register_state_cb(void (*mhi_state_cb) + (struct mhi_dev_client_cb_data *cb_data), void *data, + enum mhi_client_channel channel); + +#else +static inline int mhi_dev_open_channel(uint32_t chan_id, + struct mhi_dev_client **handle_client, + void (*event_trigger)(struct mhi_dev_client_cb_reason *cb)) +{ + return -EINVAL; +}; + +static inline int mhi_dev_close_channel(struct mhi_dev_client *handle_client) +{ + return -EINVAL; +}; + +static inline int mhi_dev_read_channel(struct mhi_req *mreq) +{ + return -EINVAL; +}; + +static inline int mhi_dev_write_channel(struct mhi_req *wreq) +{ + return -EINVAL; +}; + +static inline int mhi_dev_channel_isempty(struct mhi_dev_client *handle) +{ + return -EINVAL; +}; + +static inline int mhi_ctrl_state_info(uint32_t idx, uint32_t *info) +{ + return -EINVAL; +}; + +static inline int mhi_register_state_cb(void (*mhi_state_cb) + (struct mhi_dev_client_cb_data *cb_data), void *data, + enum mhi_client_channel channel) +{ + return -EINVAL; +}; +#endif + +#endif /* _MSM_MHI_DEV_H*/ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 51a9a0af32812dd645ec3e4569f66baf04eed7ce..eb69f32542bac331bca7364b10ce60d83ef2a866 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -239,8 +239,13 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) static inline gfp_t readahead_gfp_mask(struct address_space *x) { - return mapping_gfp_mask(x) | - __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; + gfp_t gfp_mask = mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; + + if (gfp_mask & __GFP_MOVABLE) + gfp_mask |= __GFP_CMA; + + return gfp_mask; } typedef int filler_t(struct file *, struct page *); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 69ca434d09b4eab722ef7fc25d39c7137ed0d57b..8fef6cb696aab348e3111b4c4e20d84b98fd7057 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -307,8 +307,11 @@ enum power_supply_property { POWER_SUPPLY_PROP_ESR_ACTUAL, POWER_SUPPLY_PROP_ESR_NOMINAL, POWER_SUPPLY_PROP_SOH, + POWER_SUPPLY_PROP_CLEAR_SOH, POWER_SUPPLY_PROP_FORCE_RECHARGE, POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, + POWER_SUPPLY_PROP_TOGGLE_STAT, + POWER_SUPPLY_PROP_MAIN_FCC_MAX, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e0e5bf8a33df1e550b803eace7ef028a55ab1406..15195c08be5bfff9fc090bc6af35cf7ba006b7ce 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -39,7 +39,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - unsigned int period; + u64 period; enum pwm_polarity polarity; }; @@ -66,9 +66,9 @@ enum pwm_output_type { * @cycles_per_duty: number of PWM period cycles an entry stays at */ struct pwm_output_pattern { - unsigned int *duty_pattern; + u64 *duty_pattern; unsigned int num_entries; - unsigned int cycles_per_duty; + u64 cycles_per_duty; }; /* @@ -79,8 +79,8 @@ struct pwm_output_pattern { * @enabled: PWM enabled status */ struct pwm_state { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; enum pwm_polarity polarity; enum pwm_output_type output_type; struct pwm_output_pattern *output_pattern; @@ -136,12 +136,30 @@ static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) pwm->state.period = period; } +static inline void pwm_set_period_extend(struct pwm_device *pwm, u64 period) +{ + if (pwm) + pwm->state.period = period; +} + static inline unsigned int pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; pwm_get_state(pwm, &state); + if (state.period > UINT_MAX) + pr_warn("PWM period %llu is truncated\n", state.period); + + return (unsigned int)state.period; +} + +static inline u64 pwm_get_period_extend(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + return state.period; } @@ -151,12 +169,30 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } +static inline void pwm_set_duty_cycle_extend(struct pwm_device *pwm, u64 duty) +{ + if (pwm) + pwm->state.duty_cycle = duty; +} + static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; pwm_get_state(pwm, &state); + if (state.duty_cycle > UINT_MAX) + pr_warn("PWM duty cycle %llu is truncated\n", state.duty_cycle); + + return (unsigned int)state.duty_cycle; +} + +static inline u64 pwm_get_duty_cycle_extend(const struct pwm_device *pwm) +{ + struct pwm_state state; + + pwm_get_state(pwm, &state); + return state.duty_cycle; } @@ -288,6 +324,8 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM * @config: configure duty cycles and period length for this PWM + * @config_extend: configure duty cycles and period length for this + * PWM with u64 data type * @set_polarity: configure the polarity of this PWM * @capture: capture and report PWM signal * @enable: enable PWM output toggling @@ -310,6 +348,8 @@ struct pwm_ops { void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns); + int (*config_extend)(struct pwm_chip *chip, struct pwm_device *pwm, + u64 duty_ns, u64 period_ns); int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity); int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, @@ -364,8 +404,8 @@ struct pwm_chip { * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) */ struct pwm_capture { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; }; #if IS_ENABLED(CONFIG_PWM) @@ -418,6 +458,31 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, return pwm_apply_state(pwm, &state); } +/** + * pwm_config_extend() - change PWM period and duty length with u64 data type + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + * + * Returns: 0 on success or a negative error code on failure. + */ +static inline int pwm_config_extend(struct pwm_device *pwm, u64 duty_ns, + u64 period_ns) +{ + struct pwm_state state; + + if (!pwm) + return -EINVAL; + + pwm_get_state(pwm, &state); + if (state.duty_cycle == duty_ns && state.period == period_ns) + return 0; + + state.duty_cycle = duty_ns; + state.period = period_ns; + return pwm_apply_state(pwm, &state); +} + /** * pwm_set_polarity() - configure the polarity of a PWM signal * @pwm: PWM device diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 687ab23d53a909f082af335cbb1ecd519ac24775..5818074cc9a9197bc391f78a0f1515c60d809c01 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1000,6 +1000,7 @@ struct regmap_irq_chip { bool mask_invert:1; bool use_ack:1; bool ack_invert:1; + bool clear_ack:1; bool wake_invert:1; bool runtime_pm:1; bool type_invert:1; diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d149d9d8f608edac5da96a012a08d1888eba1882..1375fd7dcf5a632a95cfa39ae4ace2824951a495 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -41,7 +41,8 @@ extern unsigned int sysctl_sched_boost; extern unsigned int sysctl_sched_group_upmigrate_pct; extern unsigned int sysctl_sched_group_downmigrate_pct; extern unsigned int sysctl_sched_walt_rotate_big_tasks; -extern unsigned int sysctl_sched_min_task_util_for_boost_colocation; +extern unsigned int sysctl_sched_min_task_util_for_boost; +extern unsigned int sysctl_sched_min_task_util_for_colocation; extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz; extern int diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index bb2d62b3a15691b8e48ecb72823503bc69305c6e..a74ec619ac5107a7a0a3d67a63385a05be256477 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -30,7 +30,7 @@ extern int lockdep_tasklist_lock_is_held(void); #endif /* #ifdef CONFIG_PROVE_RCU */ extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu, bool cpu_up); +extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 66220cc6b45c05d50f4a8d7e745745f7cfc82690..681c2a8b9fda24d901c95e3285c629934ffadb36 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -769,7 +769,11 @@ struct sk_buff { __u8 ipvs_property:1; __u8 inner_protocol_type:1; + __u8 fast_forwarded:1; __u8 remcsum_offload:1; + + /*4 or 6 bit hole */ + #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; #endif diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index 6fb023a2799715d80d6690c922e6ebd8928cad9c..df41a615b9f046afa47fc2e81dc0f244be92a9c0 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -175,7 +175,7 @@ struct qmi_ops { struct qmi_txn { struct qmi_handle *qmi; - int id; + u16 id; struct mutex lock; struct completion completion; diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index d76c0644006f4e307552352279dde5771ce2f549..7d82985d62077436b7a023f2f335a7b69ebe3b55 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -15,5 +15,7 @@ extern int try_to_unuse(unsigned int, bool, unsigned long); extern int swap_ratio(struct swap_info_struct **si, int node); extern void setup_swap_ratio(struct swap_info_struct *p, int prio); extern bool is_swap_ratio_group(int prio); +extern unsigned long generic_max_swapfile_size(void); +extern unsigned long max_swapfile_size(void); #endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/tick.h b/include/linux/tick.h index 276573c28ab303fa72eb20b3b6f130b8006fb9a8..06554d5cc6f794da1fef04588eb560c2e35247d2 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -114,26 +114,45 @@ enum tick_dep_bits { #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; extern int tick_nohz_tick_stopped(void); +extern void tick_nohz_idle_stop_tick(void); +extern void tick_nohz_idle_retain_tick(void); +extern void tick_nohz_idle_restart_tick(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); -extern ktime_t tick_nohz_get_sleep_length(void); +extern bool tick_nohz_idle_got_tick(void); +extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); + +static inline void tick_nohz_idle_stop_tick_protected(void) +{ + local_irq_disable(); + tick_nohz_idle_stop_tick(); + local_irq_enable(); +} + #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } +static inline void tick_nohz_idle_stop_tick(void) { } +static inline void tick_nohz_idle_retain_tick(void) { } +static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } +static inline bool tick_nohz_idle_got_tick(void) { return false; } -static inline ktime_t tick_nohz_get_sleep_length(void) +static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { - return NSEC_PER_SEC / HZ; + *delta_next = TICK_NSEC; + return *delta_next; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } + +static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h index 4e8417791bea47c498939a81334f043a980b0a4f..8def1431f03e4ba8577f73b9e6abe4a77c18534d 100644 --- a/include/linux/usb/f_mtp.h +++ b/include/linux/usb/f_mtp.h @@ -19,5 +19,35 @@ #define __LINUX_USB_F_MTP_H #include +#include +#ifdef CONFIG_COMPAT +#include +#endif +#ifdef __KERNEL__ + +#ifdef CONFIG_COMPAT +struct __compat_mtp_file_range { + compat_int_t fd; + compat_loff_t offset; + int64_t length; + uint16_t command; + uint32_t transaction_id; +}; + +struct __compat_mtp_event { + compat_size_t length; + compat_caddr_t data; +}; + +#define COMPAT_MTP_SEND_FILE _IOW('M', 0, \ + struct __compat_mtp_file_range) +#define COMPAT_MTP_RECEIVE_FILE _IOW('M', 1, \ + struct __compat_mtp_file_range) +#define COMPAT_MTP_SEND_EVENT _IOW('M', 3, \ + struct __compat_mtp_event) +#define COMPAT_MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, \ + struct __compat_mtp_file_range) +#endif +#endif #endif /* __LINUX_USB_F_MTP_H */ diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f9fb566e75cfd8ca1531dd733d443f9a6c929c62..5fb3f6361090db2ac7f7bbe7a5690accf08b147c 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -62,7 +62,8 @@ struct vsock_sock { struct list_head pending_links; struct list_head accept_queue; bool rejected; - struct delayed_work dwork; + struct delayed_work connect_work; + struct delayed_work pending_work; struct delayed_work close_work; bool close_work_scheduled; u32 peer_shutdown; @@ -75,7 +76,6 @@ struct vsock_sock { s64 vsock_stream_has_data(struct vsock_sock *vsk); s64 vsock_stream_has_space(struct vsock_sock *vsk); -void vsock_pending_work(struct work_struct *work); struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8433c14bd8edc95f84edb72c99aa3ab6ae055cf8..71acea013a51a796f46278a324f2258cc981704f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -32,6 +32,15 @@ /* Indicate backport support for external authentication*/ #define CFG80211_EXTERNAL_AUTH_SUPPORT 1 +/* Indicate support for including KEK length in rekey data */ +#define CFG80211_REKEY_DATA_KEK_LEN 1 + +/* Indicate backport support for the new connect done api */ +#define CFG80211_CONNECT_DONE 1 + +/* Indicate backport support for FILS SK offload in cfg80211 */ +#define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1 + /** * DOC: Introduction * @@ -2179,9 +2188,14 @@ struct cfg80211_connect_params { * have to be updated as part of update_connect_params() call. * * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated + * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm, + * username, erp sequence number and rrk) are updated + * @UPDATE_AUTH_TYPE: Indicates that Authentication type is updated */ enum cfg80211_connect_params_changed { UPDATE_ASSOC_IES = BIT(0), + UPDATE_FILS_ERP_INFO = BIT(1), + UPDATE_AUTH_TYPE = BIT(2), }; /** @@ -2403,12 +2417,14 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key (NL80211_KEK_LEN bytes) + * @kek: key encryption key * @kck: key confirmation key (NL80211_KCK_LEN bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) + * @kek_len: Length of @kek in octets */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; + size_t kek_len; }; /** diff --git a/include/net/cnss2.h b/include/net/cnss2.h index e95ef8b54094d9eca2a342f9dc5cde1de186658e..a99f10ab5dbc038b811541d90cec8241be8bfcb9 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -174,6 +174,9 @@ extern void cnss_release_pm_sem(struct device *dev); extern int cnss_wlan_pm_control(struct device *dev, bool vote); extern int cnss_auto_suspend(struct device *dev); extern int cnss_auto_resume(struct device *dev); +extern int cnss_pci_force_wake_request(struct device *dev); +extern int cnss_pci_is_device_awake(struct device *dev); +extern int cnss_pci_force_wake_release(struct device *dev); extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name, int *num_vectors, uint32_t *user_base_data, diff --git a/include/net/llc.h b/include/net/llc.h index dc35f25eb679d4651b59a20c785005aa0cbb4d0f..890a87318014d528a78a3a9631ebdb75bf037a4b 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap) refcount_inc(&sap->refcnt); } +static inline bool llc_sap_hold_safe(struct llc_sap *sap) +{ + return refcount_inc_not_zero(&sap->refcnt); +} + void llc_sap_close(struct llc_sap *sap); static inline void llc_sap_put(struct llc_sap *sap) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index a964366a7ef52fa42f855ac65dc8c245d04d6a4d..6a80c37aa553760b7a54117c2f5efecd57cf99e4 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -64,6 +64,7 @@ enum { NEIGH_VAR_GC_THRESH1, NEIGH_VAR_GC_THRESH2, NEIGH_VAR_GC_THRESH3, + NEIGH_VAR_PROBE, NEIGH_VAR_MAX }; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4bb670b83378000f9a2dd622ec0a8ff1643423a1..e634bb0c0e65e48bcb3f00fff4227a6983bbd854 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -96,6 +96,8 @@ struct nf_conn { /* Extensions */ struct nf_ct_ext *ext; + void *sfe_entry; + /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; }; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 9b5e7634713e4125b983dd5964c6f196c17f5fdf..910ae08f95e294c03a40ab29f985d84d4a46f9f2 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -51,6 +51,7 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto); +extern void (*delete_sfe_entry)(struct nf_conn *ct); /* Find a connection corresponding to a tuple. */ struct nf_conntrack_tuple_hash * diff --git a/include/net/tcp.h b/include/net/tcp.h index 12d992d978442eba3975ab838a61048f93e4ddb1..2b1c49d90974147d8876fbeb39668deb3de2d40e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -148,6 +148,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); * most likely due to retrans in 3WHS. */ +/* Number of full MSS to receive before Acking RFC2581 */ +#define TCP_DELACK_SEG 1 + #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. */ @@ -278,6 +281,11 @@ extern int sysctl_tcp_pacing_ca_ratio; extern int sysctl_tcp_default_init_rwnd; extern atomic_long_t tcp_memory_allocated; + +/* sysctl variables for controlling various tcp parameters */ +extern int sysctl_tcp_delack_seg; +extern int sysctl_tcp_use_userconfig; + extern struct percpu_counter tcp_sockets_allocated; extern unsigned long tcp_memory_pressure; @@ -373,6 +381,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +/* sysctl master controller */ +extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, + int write, void __user *buffer, size_t *length, + loff_t *ppos); +extern int tcp_proc_delayed_ack_control(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); + void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 8fd4cda282c3b2ef7d945fe024e473a86d4b9b57..0472647a9cf79963a92301885e73143cefeacadd 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -452,6 +452,9 @@ struct scsi_host_template { /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; + /* True if the low-level driver supports blk-mq only */ + unsigned force_blk_mq:1; + /* * Countdown for host blocking with no commands outstanding. */ diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h index a0c84fedce2abe8c270f489a07ea30648e47ec64..0bb1b9a10aedbff7abeb3909de0bd48105a3692c 100644 --- a/include/soc/qcom/qmi_rmnet.h +++ b/include/soc/qcom/qmi_rmnet.h @@ -17,9 +17,17 @@ #include #include +struct qmi_rmnet_ps_ind { + void (*ps_on_handler)(void *); + void (*ps_off_handler)(void *); + struct list_head list; +}; + + #ifdef CONFIG_QCOM_QMI_RMNET void qmi_rmnet_qmi_exit(void *qmi_pt, void *port); void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt); +void qmi_rmnet_enable_all_flows(struct net_device *dev); #else static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) { @@ -29,6 +37,11 @@ static inline void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) { } + +static inline void +qmi_rmnet_enable_all_flows(struct net_device *dev) +{ +} #endif #ifdef CONFIG_QCOM_QMI_DFC @@ -36,6 +49,7 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id); void qmi_rmnet_qos_exit(struct net_device *dev, void *qos); void qmi_rmnet_burst_fc_check(struct net_device *dev, int ip_type, u32 mark, unsigned int len); +int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb); #else static inline void * qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) @@ -52,15 +66,28 @@ qmi_rmnet_burst_fc_check(struct net_device *dev, int ip_type, u32 mark, unsigned int len) { } + +static inline int qmi_rmnet_get_queue(struct net_device *dev, + struct sk_buff *skb) +{ + return 0; +} #endif #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable); void qmi_rmnet_work_init(void *port); void qmi_rmnet_work_exit(void *port); -int qmi_rmnet_work_get_active(void *port); -void qmi_rmnet_work_set_active(void *port, int status); +void qmi_rmnet_work_maybe_restart(void *port); void qmi_rmnet_work_restart(void *port); + +int qmi_rmnet_ps_ind_register(void *port, + struct qmi_rmnet_ps_ind *ps_ind); +int qmi_rmnet_ps_ind_deregister(void *port, + struct qmi_rmnet_ps_ind *ps_ind); +void qmi_rmnet_ps_off_notify(void *port); +void qmi_rmnet_ps_on_notify(void *port); + #else static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable) { @@ -77,12 +104,28 @@ static inline void qmi_rmnet_work_exit(void *port) { } -static inline int qmi_rmnet_work_get_active(void *port) +static inline void qmi_rmnet_work_maybe_restart(void *port) +{ + +} + +static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port, + struct qmi_rmnet_ps_ind *ps_ind) { return 0; +} +static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + return 0; +} + +static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port) +{ } -static inline void qmi_rmnet_work_set_active(void *port, int status) + +static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port) { } diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h index 0b5debd3157c9d926a280b5bc470772cd093232d..241d95c97b25cea39b70ba3b540c4fe100add2d6 100644 --- a/include/soc/qcom/rmnet_qmi.h +++ b/include/soc/qcom/rmnet_qmi.h @@ -26,9 +26,11 @@ void *rmnet_get_rmnet_port(struct net_device *dev); struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id); void rmnet_reset_qmi_pt(void *port); void rmnet_init_qmi_pt(void *port, void *qmi); +void rmnet_enable_all_flows(void *port); void rmnet_set_powersave_format(void *port); void rmnet_clear_powersave_format(void *port); void rmnet_get_packets(void *port, u64 *rx, u64 *tx); +int rmnet_get_powersave_notif(void *port); #else static inline void *rmnet_get_qmi_pt(void *port) { @@ -59,6 +61,10 @@ static inline void rmnet_init_qmi_pt(void *port, void *qmi) { } +static inline void rmnet_enable_all_flows(void *port) +{ +} + static inline void rmnet_set_port_format(void *port) { } @@ -67,5 +73,10 @@ static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx) { } +static inline int rmnet_get_powersave_notif(void *port) +{ + return 0; +} + #endif /* CONFIG_QCOM_QMI_RMNET */ #endif /*_RMNET_QMI_H*/ diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h index 75f017ce0dffc14c5388390e1451903b88ce75a8..7b13c8edb100b471dd8b1e6eedd4a6da7ec374c9 100644 --- a/include/soc/qcom/secure_buffer.h +++ b/include/soc/qcom/secure_buffer.h @@ -57,6 +57,12 @@ int hyp_assign_table(struct sg_table *table, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, int dest_nelems); + +int try_hyp_assign_table(struct sg_table *table, + u32 *source_vm_list, int source_nelems, + int *dest_vmids, int *dest_perms, + int dest_nelems); + extern int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vmlist, int source_nelems, int *dest_vmids, int *dest_perms, int dest_nelems); @@ -81,6 +87,14 @@ static inline int hyp_assign_table(struct sg_table *table, return -EINVAL; } +static inline int try_hyp_assign_table(struct sg_table *table, + u32 *source_vm_list, int source_nelems, + int *dest_vmids, int *dest_perms, + int dest_nelems) +{ + return -EINVAL; +} + static inline int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vmlist, int source_nelems, int *dest_vmids, int *dest_perms, int dest_nelems) diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index d20676facd84315b60c2b1133b8867dc7555ce9e..e821dc9fdf0b35423e149a22c16f378432241585 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -77,6 +77,8 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxprairie") #define early_machine_is_sdmmagpie() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmmagpie") +#define early_machine_is_trinket() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,trinket") #else #define of_board_is_sim() 0 #define of_board_is_rumi() 0 @@ -105,6 +107,7 @@ #define early_machine_is_qcs401() 0 #define early_machine_is_sdxprairie() 0 #define early_machine_is_sdmmagpie() 0 +#define early_machine_is_trinket() 0 #endif #define PLATFORM_SUBTYPE_MDM 1 @@ -135,6 +138,7 @@ enum msm_cpu { MSM_CPU_QCS401, SDX_CPU_SDXPRAIRIE, MSM_CPU_SDMMAGPIE, + MSM_CPU_TRINKET, }; struct msm_soc_info { diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h index b2d82584a3afdd16922ceab4aefba71f2cf0ee3e..36cd472e3ae0046c0bf50be514bef1f473304667 100644 --- a/include/soc/qcom/sysmon.h +++ b/include/soc/qcom/sysmon.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,6 +23,7 @@ */ enum ssctl_ssr_event_enum_type { SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647, + SSCTL_SSR_EVENT_INVALID = -1, SSCTL_SSR_EVENT_BEFORE_POWERUP = 0, SSCTL_SSR_EVENT_AFTER_POWERUP = 1, SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2, diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index cd9c6dbddffd9ad17f88380276766a9767545f51..af4a2edf6e6406fa4e18c539efc36164dae5b15a 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h @@ -228,6 +228,42 @@ DEFINE_EVENT(clk_state_dump, clk_state, TP_ARGS(name, prepare_count, enable_count, rate, vdd_level) ); +DECLARE_EVENT_CLASS(clk_duty_cycle, + + TP_PROTO(struct clk_core *core, struct clk_duty *duty), + + TP_ARGS(core, duty), + + TP_STRUCT__entry( + __string( name, core->name ) + __field( unsigned int, num ) + __field( unsigned int, den ) + ), + + TP_fast_assign( + __assign_str(name, core->name); + __entry->num = duty->num; + __entry->den = duty->den; + ), + + TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num, + (unsigned int)__entry->den) +); + +DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle, + + TP_PROTO(struct clk_core *core, struct clk_duty *duty), + + TP_ARGS(core, duty) +); + +DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete, + + TP_PROTO(struct clk_core *core, struct clk_duty *duty), + + TP_ARGS(core, duty) +); + #endif /* _TRACE_CLK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h index dc763220e656756308e0739f1af3e1dde7411717..10570cc8fa6e63091e13ed1f726898306cb05bd7 100644 --- a/include/trace/events/dfc.h +++ b/include/trace/events/dfc.h @@ -18,14 +18,15 @@ #include -DECLARE_EVENT_CLASS(dfc_tc, +TRACE_EVENT(dfc_qmi_tc, - TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen, - u32 tcm_handle, int enable), + TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant, + int qlen, u32 tcm_handle, int enable), - TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable), + TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable), TP_STRUCT__entry( + __string(dev_name, name) __field(u8, bid) __field(u32, fid) __field(u32, grant) @@ -35,6 +36,7 @@ DECLARE_EVENT_CLASS(dfc_tc, ), TP_fast_assign( + __assign_str(dev_name, name); __entry->bid = bearer_id; __entry->fid = flow_id; __entry->grant = grant; @@ -43,35 +45,20 @@ DECLARE_EVENT_CLASS(dfc_tc, __entry->enable = enable; ), - TP_printk("bearer_id=%u grant=%u qdisc_len=%d flow_id=%u " - "tcm_handle=0x%x %s", + TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s", + __get_str(dev_name), __entry->bid, __entry->grant, __entry->qlen, __entry->fid, __entry->tcm_handle, __entry->enable ? "enable" : "disable") ); -DEFINE_EVENT(dfc_tc, dfc_qmi_tc, - - TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen, - u32 tcm_handle, int enable), - - TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable) -); - -DEFINE_EVENT(dfc_tc, dfc_qmi_tc_limit, - - TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen, - u32 tcm_handle, int enable), - - TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable) -); - TRACE_EVENT(dfc_flow_ind, TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant, - u16 seq_num, u8 ack_req), + u16 seq_num, u8 ack_req, u32 ancillary), - TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req), + TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req, + ancillary), TP_STRUCT__entry( __field(int, src) @@ -81,6 +68,7 @@ TRACE_EVENT(dfc_flow_ind, __field(u32, grant) __field(u16, seq) __field(u8, ack_req) + __field(u32, ancillary) ), TP_fast_assign( @@ -91,43 +79,49 @@ TRACE_EVENT(dfc_flow_ind, __entry->grant = grant; __entry->seq = seq_num; __entry->ack_req = ack_req; + __entry->ancillary = ancillary; ), - TP_printk("src=%d idx[%d]: mux_id=%u bearer_id=%u grant=%u " - "seq_num=%u ack_req=%u", + TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u", __entry->src, __entry->idx, __entry->mid, __entry->bid, - __entry->grant, __entry->seq, __entry->ack_req) + __entry->grant, __entry->seq, __entry->ack_req, + __entry->ancillary) ); TRACE_EVENT(dfc_flow_check, - TP_PROTO(u8 bearer_id, unsigned int len, u32 grant), + TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant), - TP_ARGS(bearer_id, len, grant), + TP_ARGS(name, bearer_id, len, grant), TP_STRUCT__entry( + __string(dev_name, name) __field(u8, bearer_id) __field(unsigned int, len) __field(u32, grant) ), TP_fast_assign( + __assign_str(dev_name, name) __entry->bearer_id = bearer_id; __entry->len = len; __entry->grant = grant; ), - TP_printk("bearer_id=%u skb_len=%u current_grant=%u", + TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u", + __get_str(dev_name), __entry->bearer_id, __entry->len, __entry->grant) ); TRACE_EVENT(dfc_flow_info, - TP_PROTO(u8 bearer_id, u32 flow_id, int ip_type, u32 handle, int add), + TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type, + u32 handle, int add), - TP_ARGS(bearer_id, flow_id, ip_type, handle, add), + TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add), TP_STRUCT__entry( + __string(dev_name, name) __field(u8, bid) __field(u32, fid) __field(int, ip) @@ -136,6 +130,7 @@ TRACE_EVENT(dfc_flow_info, ), TP_fast_assign( + __assign_str(dev_name, name) __entry->bid = bearer_id; __entry->fid = flow_id; __entry->ip = ip_type; @@ -143,8 +138,9 @@ TRACE_EVENT(dfc_flow_info, __entry->action = add; ), - TP_printk("%s: bearer_id=%u flow_id=%u ip_type=%d tcm_handle=0x%x", + TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d", __entry->action ? "add flow" : "delete flow", + __get_str(dev_name), __entry->bid, __entry->fid, __entry->ip, __entry->handle) ); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9c4762a0d48bc845ae1c4b5fed1bbb5d429d996b..83bbd090ed6dcb6724725cc68614e07e5bd34ca3 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -164,9 +164,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * * RUNNING (we will not have dequeued if state != RUNNING). */ if (preempt) - return TASK_STATE_MAX; + return TASK_REPORT_MAX; - return __get_task_state(p); + return 1 << __get_task_state(p); } #endif /* CREATE_TRACE_POINTS */ @@ -212,7 +212,7 @@ TRACE_EVENT(sched_switch, { 0x40, "P" }, { 0x80, "I" }) : "R", - __entry->prev_state & TASK_STATE_MAX ? "+" : "", + __entry->prev_state & TASK_REPORT_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h index 38f710ec607d5e333543477d46e11bf5fb24caa1..9689e715076fd3510c9b15d830ee618350dd1680 100644 --- a/include/uapi/drm/msm_drm_pp.h +++ b/include/uapi/drm/msm_drm_pp.h @@ -436,4 +436,23 @@ struct drm_msm_pa_dither { __u32 matrix[DITHER_MATRIX_SZ]; }; +/** + * struct drm_msm_ad4_roi_cfg - ad4 roi params config set + * by user-space client. + * @h_x - hotizontal direction start + * @h_y - hotizontal direction end + * @v_x - vertical direction start + * @v_y - vertical direction end + * @factor_in - the alpha value for inside roi region + * @factor_out - the alpha value for outside roi region + */ +#define DRM_MSM_AD4_ROI +struct drm_msm_ad4_roi_cfg { + __u32 h_x; + __u32 h_y; + __u32 v_x; + __u32 v_y; + __u32 factor_in; + __u32 factor_out; +}; #endif /* _MSM_DRM_PP_H_ */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 0c0b003244f39ec6fd10032da1de876e24f6d43a..96df0c614147fb121fd1793855ad65bfd6355988 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -15,6 +15,7 @@ ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) no-export-headers += kvm_para.h endif +header-y += mhi.h header-y += sockev.h header-y += nfc/ header-y += seemp_api.h diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 857bad91c454046200c81e260c9850933df0981b..27c62abb6c9ef0b69226401dac60229932e95cde 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt { #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) +#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) /* * Extension capability list. @@ -932,6 +933,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_SYNIC2 148 #define KVM_CAP_HYPERV_VP_INDEX 149 #define KVM_CAP_S390_BPB 152 +#define KVM_CAP_GET_MSR_FEATURES 153 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/linux/mhi.h b/include/uapi/linux/mhi.h new file mode 100644 index 0000000000000000000000000000000000000000..bfd03862624b88c2ef539ad3ac9abb8d2054e50f --- /dev/null +++ b/include/uapi/linux/mhi.h @@ -0,0 +1,36 @@ +#ifndef _UAPI_MHI_H +#define _UAPI_MHI_H + +#include +#include + +enum peripheral_ep_type { + DATA_EP_TYPE_RESERVED, + DATA_EP_TYPE_HSIC, + DATA_EP_TYPE_HSUSB, + DATA_EP_TYPE_PCIE, + DATA_EP_TYPE_EMBEDDED, + DATA_EP_TYPE_BAM_DMUX, +}; + +struct peripheral_ep_info { + enum peripheral_ep_type ep_type; + __u32 peripheral_iface_id; +}; + +struct ipa_ep_pair { + __u32 cons_pipe_num; + __u32 prod_pipe_num; +}; + +struct ep_info { + struct peripheral_ep_info ph_ep_info; + struct ipa_ep_pair ipa_ep_pair; + +}; + +#define MHI_UCI_IOCTL_MAGIC 'm' + +#define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info) + +#endif /* _UAPI_MHI_H */ diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 68e85c9ef72bc86b8e70aee7d4a3d256fbd2d9b8..5ef0962b9e34ec62605c5e17d6e2eaca8deeda1e 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -103,6 +103,8 @@ #define IPA_IOCTL_ODL_QUERY_ADAPL_EP_INFO 61 #define IPA_IOCTL_ODL_GET_AGG_BYTE_LIMIT 62 #define IPA_IOCTL_ODL_QUERY_MODEM_CONFIG 63 +#define IPA_IOCTL_GSB_CONNECT 64 +#define IPA_IOCTL_GSB_DISCONNECT 65 /** @@ -570,7 +572,14 @@ enum ipa_wlan_fw_ssr_event { #define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX }; -#define IPA_EVENT_MAX_NUM (IPA_WLAN_FW_SSR_EVENT_MAX) +enum ipa_gsb_event { + IPA_GSB_CONNECT = IPA_WLAN_FW_SSR_EVENT_MAX, + IPA_GSB_DISCONNECT, + IPA_GSB_EVENT_MAX, +#define IPA_GSB_EVENT_MAX IPA_GSB_EVENT_MAX +}; + +#define IPA_EVENT_MAX_NUM (IPA_GSB_EVENT_MAX) #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** @@ -1734,6 +1743,14 @@ struct ipa_ioc_l2tp_vlan_mapping_info { char vlan_iface_name[IPA_RESOURCE_NAME_MAX]; }; +/** + * struct ipa_ioc_gsb_info - connect/disconnect + * @name: interface name + */ +struct ipa_ioc_gsb_info { + char name[IPA_RESOURCE_NAME_MAX]; +}; + /** * struct ipa_msg_meta - Format of the message meta-data. * @msg_type: the type of the message @@ -1870,16 +1887,20 @@ enum ipacm_client_enum { IPACM_CLIENT_MAX }; +#define IPACM_SUPPORT_OF_LAN_STATS_FOR_ODU_CLIENTS + enum ipacm_per_client_device_type { IPACM_CLIENT_DEVICE_TYPE_USB = 0, IPACM_CLIENT_DEVICE_TYPE_WLAN = 1, - IPACM_CLIENT_DEVICE_TYPE_ETH = 2 + IPACM_CLIENT_DEVICE_TYPE_ETH = 2, + IPACM_CLIENT_DEVICE_TYPE_ODU = 3, + IPACM_CLIENT_DEVICE_MAX }; /** * max number of device types supported. */ -#define IPACM_MAX_CLIENT_DEVICE_TYPES 3 +#define IPACM_MAX_CLIENT_DEVICE_TYPES IPACM_CLIENT_DEVICE_MAX /** * @lanIface - Name of the lan interface @@ -2172,6 +2193,14 @@ struct ipa_odl_modem_config { IPA_IOCTL_ODL_QUERY_MODEM_CONFIG, \ struct ipa_odl_modem_config) +#define IPA_IOC_GSB_CONNECT _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GSB_CONNECT, \ + struct ipa_ioc_gsb_info) + +#define IPA_IOC_GSB_DISCONNECT _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GSB_DISCONNECT, \ + struct ipa_ioc_gsb_info) + /* * unique magic number of the Tethering bridge ioctls */ diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index 03094d5161da8649bb37c64e4cf59f4f106a559a..d85adb6d02163dd1945ca6e43f716618878126b6 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -65,6 +65,7 @@ #define KGSL_CONTEXT_TYPE_CL 2 #define KGSL_CONTEXT_TYPE_C2D 3 #define KGSL_CONTEXT_TYPE_RS 4 +#define KGSL_CONTEXT_TYPE_VK 5 #define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000 diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h index d1c61b7839c952703896dbb3f448f2184cef3d34..771cea0e4bd1e5d65d55161430b85b2958b460c8 100644 --- a/include/uapi/linux/msm_npu.h +++ b/include/uapi/linux/msm_npu.h @@ -58,7 +58,6 @@ #define MSM_NPU_MAX_OUTPUT_LAYER_NUM 4 #define MSM_NPU_MAX_PATCH_LAYER_NUM (MSM_NPU_MAX_INPUT_LAYER_NUM +\ MSM_NPU_MAX_OUTPUT_LAYER_NUM) -#define MSM_NPU_MAX_STATS_BUF_SIZE 65536 /* ------------------------------------------------------------------------- * Data Structures diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 2f226b145f7d86e975c9c2ca0c7296793db0ab09..9a4fea7c4e4608ce705fd7653e1a4f63bdeed891 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -737,6 +737,9 @@ enum v4l2_mpeg_vidc_extradata { V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA = 17, V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP = 18, V4L2_MPEG_VIDC_EXTRADATA_LTR = 20, +#define V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA \ + V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA + V4L2_MPEG_VIDC_EXTRADATA_HDR10PLUS_METADATA = 23, V4L2_MPEG_VIDC_EXTRADATA_ROI_QP = 24, V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP = 25, V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI = 26, diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h index 34a65599b558ed178b27aacca8760d483000d55d..163264334beebfe1becaf5335851629d3f85f978 100644 --- a/include/uapi/media/cam_defs.h +++ b/include/uapi/media/cam_defs.h @@ -18,6 +18,10 @@ #define CAM_FLUSH_REQ (CAM_COMMON_OPCODE_BASE + 0x8) #define CAM_COMMON_OPCODE_MAX (CAM_COMMON_OPCODE_BASE + 0x9) +#define CAM_COMMON_OPCODE_BASE_v2 0x150 +#define CAM_ACQUIRE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x1) +#define CAM_RELEASE_HW (CAM_COMMON_OPCODE_BASE_v2 + 0x2) + #define CAM_EXT_OPCODE_BASE 0x200 #define CAM_CONFIG_DEV_EXTERNAL (CAM_EXT_OPCODE_BASE + 0x1) @@ -483,6 +487,73 @@ struct cam_acquire_dev_cmd { uint64_t resource_hdl; }; +/* + * In old version, while acquiring device the num_resources in + * struct cam_acquire_dev_cmd will be a valid value. During ACQUIRE_DEV + * KMD driver will return dev_handle as well as associate HW to handle. + * If num_resources is set to the constant below, we are using + * the new version and we do not acquire HW in ACQUIRE_DEV IOCTL. + * ACQUIRE_DEV will only return handle and we should receive + * ACQUIRE_HW IOCTL after ACQUIRE_DEV and that is when the HW + * is associated with the dev_handle. + * + * (Data type): uint32_t + */ +#define CAM_API_COMPAT_CONSTANT 0xFEFEFEFE + +#define CAM_ACQUIRE_HW_STRUCT_VERSION_1 1 + +/** + * struct cam_acquire_hw_cmd_v1 - Control payload for acquire HW IOCTL (Ver 1) + * + * @struct_version: = CAM_ACQUIRE_HW_STRUCT_VERSION_1 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @reserved: Reserved field for 64-bit alignment + * @session_handle: Session handle for the acquire command + * @dev_handle: Device handle to be returned + * @handle_type: Tells you how to interpret the variable resource_hdl- + * 1 = user pointer, 2 = mem handle + * @data_size: Total size of data contained in memory pointed + * to by resource_hdl + * @resource_hdl: Resource handle that refers to the actual + * resource data. + */ +struct cam_acquire_hw_cmd_v1 { + uint32_t struct_version; + uint32_t reserved; + int32_t session_handle; + int32_t dev_handle; + uint32_t handle_type; + uint32_t data_size; + uint64_t resource_hdl; +}; + +#define CAM_RELEASE_HW_STRUCT_VERSION_1 1 + +/** + * struct cam_release_hw_cmd_v1 - Control payload for release HW IOCTL (Ver 1) + * + * @struct_version: = CAM_RELEASE_HW_STRUCT_VERSION_1 for this struct + * This value should be the first 32-bits in any structure + * related to this IOCTL. So that if the struct needs to + * change, we can first read the starting 32-bits, get the + * version number and then typecast the data to struct + * accordingly. + * @reserved: Reserved field for 64-bit alignment + * @session_handle: Session handle for the release + * @dev_handle: Device handle for the release + */ +struct cam_release_hw_cmd_v1 { + uint32_t struct_version; + uint32_t reserved; + int32_t session_handle; + int32_t dev_handle; +}; + /** * struct cam_flush_dev_cmd - Control payload for flush devices * diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h index 0f23e595fb1902b589125744bd0f8db39b346a56..07b91909332f76eda82997ef21241c2df01cd1da 100644 --- a/include/uapi/media/cam_isp.h +++ b/include/uapi/media/cam_isp.h @@ -376,4 +376,42 @@ struct cam_isp_bw_config { struct cam_isp_bw_vote rdi_vote[1]; } __attribute__((packed)); + +/* Acquire Device/HW v2 */ + +/** + * struct cam_isp_acquire_hw_info - ISP acquire HW params + * + * @common_info_version : Version of common info struct used + * @common_info_size : Size of common info struct used + * @common_info_offset : Offset of common info from start of data + * @num_inputs : Number of inputs + * @input_info_version : Version of input info struct used + * @input_info_size : Size of input info struct used + * @input_info_offset : Offset of input info from start of data + * @data : Start of data region + */ +struct cam_isp_acquire_hw_info { + uint16_t common_info_version; + uint16_t common_info_size; + uint32_t common_info_offset; + uint32_t num_inputs; + uint32_t input_info_version; + uint32_t input_info_size; + uint32_t input_info_offset; + uint64_t data; +}; + +#define CAM_ISP_ACQUIRE_COMMON_VER0 0x1000 + +#define CAM_ISP_ACQUIRE_COMMON_SIZE_VER0 0x0 + +#define CAM_ISP_ACQUIRE_INPUT_VER0 0x2000 + +#define CAM_ISP_ACQUIRE_INPUT_SIZE_VER0 sizeof(struct cam_isp_in_port_info) + +#define CAM_ISP_ACQUIRE_OUT_VER0 0x3000 + +#define CAM_ISP_ACQUIRE_OUT_SIZE_VER0 sizeof(struct cam_isp_out_port_info) + #endif /* __UAPI_CAM_ISP_H__ */ diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h index 841c40af4d47f441417f7cbf27fd5cadc908c66d..ae65649964ff0ac9b61712d8a8fe1d4a7eb3d77e 100644 --- a/include/uapi/media/cam_req_mgr.h +++ b/include/uapi/media/cam_req_mgr.h @@ -262,6 +262,9 @@ struct cam_req_mgr_link_control { ((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \ (fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \ +#define GET_FD_FROM_HANDLE(hdl) \ + (hdl >> (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE)) \ + #define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK) #define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \ diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h index 37947cf55a6a35abbe8b2ae6c204fdf49ce3d10a..a71c490edb2d58deb49acf32e5ab635fdd6b3969 100644 --- a/include/uapi/media/msm_vidc_utils.h +++ b/include/uapi/media/msm_vidc_utils.h @@ -187,6 +187,11 @@ struct msm_vidc_roi_deltaqp_payload { unsigned int data[1]; }; +struct msm_vidc_hdr10plus_metadata_payload { + unsigned int size; + unsigned int data[1]; +}; + struct msm_vidc_mastering_display_colour_sei_payload { unsigned int nDisplayPrimariesX[3]; unsigned int nDisplayPrimariesY[3]; @@ -238,6 +243,7 @@ struct msm_vidc_vui_display_info_payload { #define MSM_VIDC_EXTRADATA_PQ_INFO 0x00000017 #define MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI 0x00000018 #define MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO 0x00000019 +#define MSM_VIDC_EXTRADATA_HDR10PLUS_METADATA 0x0000001A #define MSM_VIDC_EXTRADATA_INPUT_CROP 0x0700000E #define MSM_VIDC_EXTRADATA_OUTPUT_CROP 0x0700000F #define MSM_VIDC_EXTRADATA_MULTISLICE_INFO 0x7F100000 diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index d6c5f7615281891ff74eb4dd1c99414a503cb234..1674418197c96129431f2bb3f910e07d9b9037b1 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -140,6 +140,11 @@ struct snd_compr_audio_info { #define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0 #define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1 +enum sndrv_compress_latency_mode { + SNDRV_COMPRESS_LEGACY_LATENCY_MODE = 0, + SNDRV_COMPRESS_LOW_LATENCY_MODE = 1, +}; + /** * enum sndrv_compress_encoder * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the @@ -166,6 +171,7 @@ enum sndrv_compress_encoder { SNDRV_COMPRESS_START_DELAY = 9, SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10, SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11, + SNDRV_COMPRESS_LATENCY_MODE = 12, }; #define SNDRV_COMPRESS_MIN_BLK_SIZE SNDRV_COMPRESS_MIN_BLK_SIZE @@ -178,6 +184,7 @@ enum sndrv_compress_encoder { #define SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK \ SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK #define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK +#define SNDRV_COMPRESS_LATENCY_MODE SNDRV_COMPRESS_LATENCY_MODE /** * struct snd_compr_metadata - compressed stream metadata diff --git a/init/main.c b/init/main.c index 0d88f37febcb29d56a1f18297a9b9a63dddb5243..c4a45145e102db935d2b7c6032b24e2e797189a1 100644 --- a/init/main.c +++ b/init/main.c @@ -543,8 +543,8 @@ asmlinkage __visible void __init start_kernel(void) setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); - boot_cpu_state_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + boot_cpu_hotplug_init(); build_all_zonelists(NULL); page_alloc_init(); diff --git a/kernel/cpu.c b/kernel/cpu.c index 25c003409b074c6f6a38619120284035937106e0..ce91a253b778a4fbb7a1860d432eec6e4fd05274 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -63,6 +63,7 @@ struct cpuhp_cpu_state { bool rollback; bool single; bool bringup; + bool booted_once; struct hlist_node *node; struct hlist_node *last; enum cpuhp_state cb_state; @@ -349,6 +350,85 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ +#ifdef CONFIG_HOTPLUG_SMT +enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; +EXPORT_SYMBOL_GPL(cpu_smt_control); + +static bool cpu_smt_available __read_mostly; + +void __init cpu_smt_disable(bool force) +{ + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + return; + + if (force) { + pr_info("SMT: Force disabled\n"); + cpu_smt_control = CPU_SMT_FORCE_DISABLED; + } else { + cpu_smt_control = CPU_SMT_DISABLED; + } +} + +/* + * The decision whether SMT is supported can only be done after the full + * CPU identification. Called from architecture code before non boot CPUs + * are brought up. + */ +void __init cpu_smt_check_topology_early(void) +{ + if (!topology_smt_supported()) + cpu_smt_control = CPU_SMT_NOT_SUPPORTED; +} + +/* + * If SMT was disabled by BIOS, detect it here, after the CPUs have been + * brought online. This ensures the smt/l1tf sysfs entries are consistent + * with reality. cpu_smt_available is set to true during the bringup of non + * boot CPUs when a SMT sibling is detected. Note, this may overwrite + * cpu_smt_control's previous setting. + */ +void __init cpu_smt_check_topology(void) +{ + if (!cpu_smt_available) + cpu_smt_control = CPU_SMT_NOT_SUPPORTED; +} + +static int __init smt_cmdline_disable(char *str) +{ + cpu_smt_disable(str && !strcmp(str, "force")); + return 0; +} +early_param("nosmt", smt_cmdline_disable); + +static inline bool cpu_smt_allowed(unsigned int cpu) +{ + if (topology_is_primary_thread(cpu)) + return true; + + /* + * If the CPU is not a 'primary' thread and the booted_once bit is + * set then the processor has SMT support. Store this information + * for the late check of SMT support in cpu_smt_check_topology(). + */ + if (per_cpu(cpuhp_state, cpu).booted_once) + cpu_smt_available = true; + + if (cpu_smt_control == CPU_SMT_ENABLED) + return true; + + /* + * On x86 it's required to boot all logical CPUs at least once so + * that the init code can get a chance to set CR4.MCE on each + * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any + * core will shutdown the machine. + */ + return !per_cpu(cpuhp_state, cpu).booted_once; +} +#else +static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } +#endif + static inline enum cpuhp_state cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) { @@ -429,6 +509,16 @@ static int bringup_wait_for_ap(unsigned int cpu) stop_machine_unpark(cpu); kthread_unpark(st->thread); + /* + * SMT soft disabling on X86 requires to bring the CPU out of the + * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The + * CPU marked itself as booted_once in cpu_notify_starting() so the + * cpu_smt_allowed() check will now return false if this is not the + * primary sibling. + */ + if (!cpu_smt_allowed(cpu)) + return -ECANCELED; + if (st->target <= CPUHP_AP_ONLINE_IDLE) return 0; @@ -761,7 +851,6 @@ static int takedown_cpu(unsigned int cpu) /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); - smpboot_park_threads(cpu); /* * Prevent irq alloc/free while the dying cpu reorganizes the @@ -923,20 +1012,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, return ret; } +static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) +{ + if (cpu_hotplug_disabled) + return -EBUSY; + return _cpu_down(cpu, 0, target); +} + static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) { int err; cpu_maps_update_begin(); - - if (cpu_hotplug_disabled) { - err = -EBUSY; - goto out; - } - - err = _cpu_down(cpu, 0, target); - -out: + err = cpu_down_maps_locked(cpu, target); cpu_maps_update_done(); return err; } @@ -965,6 +1053,7 @@ void notify_cpu_starting(unsigned int cpu) int ret; rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ + st->booted_once = true; while (st->state < target) { st->state++; ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); @@ -1110,6 +1199,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) err = -EBUSY; goto out; } + if (!cpu_smt_allowed(cpu)) { + err = -EPERM; + goto out; + } err = _cpu_up(cpu, 0, target); out: @@ -1412,7 +1505,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { [CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot/threads:online", .startup.single = smpboot_unpark_threads, - .teardown.single = NULL, + .teardown.single = smpboot_park_threads, }, [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { .name = "irq/affinity:online", @@ -1986,10 +2079,172 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = { NULL }; +#ifdef CONFIG_HOTPLUG_SMT + +static const char *smt_states[] = { + [CPU_SMT_ENABLED] = "on", + [CPU_SMT_DISABLED] = "off", + [CPU_SMT_FORCE_DISABLED] = "forceoff", + [CPU_SMT_NOT_SUPPORTED] = "notsupported", +}; + +static ssize_t +show_smt_control(struct device *dev, struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]); +} + +static void cpuhp_offline_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = true; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); +} + +static void cpuhp_online_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = false; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +} + +static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + for_each_online_cpu(cpu) { + if (topology_is_primary_thread(cpu)) + continue; + ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); + if (ret) + break; + /* + * As this needs to hold the cpu maps lock it's impossible + * to call device_offline() because that ends up calling + * cpu_down() which takes cpu maps lock. cpu maps lock + * needs to be held as this might race against in kernel + * abusers of the hotplug machinery (thermal management). + * + * So nothing would update device:offline state. That would + * leave the sysfs entry stale and prevent onlining after + * smt control has been changed to 'off' again. This is + * called under the sysfs hotplug lock, so it is properly + * serialized against the regular offline usage. + */ + cpuhp_offline_cpu_device(cpu); + } + if (!ret) + cpu_smt_control = ctrlval; + cpu_maps_update_done(); + return ret; +} + +static int cpuhp_smt_enable(void) +{ + int cpu, ret = 0; + + cpu_maps_update_begin(); + cpu_smt_control = CPU_SMT_ENABLED; + for_each_present_cpu(cpu) { + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) + continue; + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); + if (ret) + break; + /* See comment in cpuhp_smt_disable() */ + cpuhp_online_cpu_device(cpu); + } + cpu_maps_update_done(); + return ret; +} + +static ssize_t +store_smt_control(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ctrlval, ret; + + if (sysfs_streq(buf, "on")) + ctrlval = CPU_SMT_ENABLED; + else if (sysfs_streq(buf, "off")) + ctrlval = CPU_SMT_DISABLED; + else if (sysfs_streq(buf, "forceoff")) + ctrlval = CPU_SMT_FORCE_DISABLED; + else + return -EINVAL; + + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) + return -EPERM; + + if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + return -ENODEV; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + if (ctrlval != cpu_smt_control) { + switch (ctrlval) { + case CPU_SMT_ENABLED: + ret = cpuhp_smt_enable(); + break; + case CPU_SMT_DISABLED: + case CPU_SMT_FORCE_DISABLED: + ret = cpuhp_smt_disable(ctrlval); + break; + } + } + + unlock_device_hotplug(); + return ret ? ret : count; +} +static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control); + +static ssize_t +show_smt_active(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool active = topology_max_smt_threads() > 1; + + return snprintf(buf, PAGE_SIZE - 2, "%d\n", active); +} +static DEVICE_ATTR(active, 0444, show_smt_active, NULL); + +static struct attribute *cpuhp_smt_attrs[] = { + &dev_attr_control.attr, + &dev_attr_active.attr, + NULL +}; + +static const struct attribute_group cpuhp_smt_attr_group = { + .attrs = cpuhp_smt_attrs, + .name = "smt", + NULL +}; + +static int __init cpu_smt_state_init(void) +{ + return sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpuhp_smt_attr_group); +} + +#else +static inline int cpu_smt_state_init(void) { return 0; } +#endif + static int __init cpuhp_sysfs_init(void) { int cpu, ret; + ret = cpu_smt_state_init(); + if (ret) + return ret; + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpuhp_cpu_root_attr_group); if (ret) @@ -2098,9 +2353,12 @@ void __init boot_cpu_init(void) /* * Must be called _AFTER_ setting up the per_cpu areas */ -void __init boot_cpu_state_init(void) +void __init boot_cpu_hotplug_init(void) { - per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; +#ifdef CONFIG_SMP + this_cpu_write(cpuhp_state.booted_once, true); +#endif + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } static ATOMIC_NOTIFIER_HEAD(idle_notifier); diff --git a/kernel/fork.c b/kernel/fork.c index 88b065b2c2e97e1adaa3a865235d762994a8f329..35a0c095dab2baf8885644cffe26e46108d0a9f0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2017,7 +2017,7 @@ struct task_struct *fork_idle(int cpu) cpu_to_node(cpu)); if (!IS_ERR(task)) { init_idle_pids(task->pids); - init_idle(task, cpu, false); + init_idle(task, cpu); } return task; diff --git a/kernel/module.c b/kernel/module.c index d89d348f185ce8452760b2b105940a885ca73ffc..52d106d463e973703902ae8b7cb3f8c348205b6c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2178,6 +2178,11 @@ static void free_module(struct module *mod) /* Finally, free the core (containing the module structure) */ disable_ro_nx(&mod->core_layout); +#ifdef CONFIG_DEBUG_MODULE_LOAD_INFO + pr_info("Unloaded %s: module core layout address range: 0x%lx-0x%lx\n", + mod->name, (long)mod->core_layout.base, + (long)(mod->core_layout.base + mod->core_layout.size - 1)); +#endif module_memfree(mod->core_layout.base); #ifdef CONFIG_MPU @@ -3507,6 +3512,14 @@ static noinline int do_init_module(struct module *mod) mod_tree_remove_init(mod); disable_ro_nx(&mod->init_layout); module_arch_freeing_init(mod); +#ifdef CONFIG_DEBUG_MODULE_LOAD_INFO + pr_info("Loaded %s: module init layout addresses range: 0x%lx-0x%lx\n", + mod->name, (long)mod->init_layout.base, + (long)(mod->init_layout.base + mod->init_layout.size - 1)); + pr_info("%s: core layout addresses range: 0x%lx-0x%lx\n", mod->name, + (long)mod->core_layout.base, + (long)(mod->core_layout.base + mod->core_layout.size - 1)); +#endif mod->init_layout.base = NULL; mod->init_layout.size = 0; mod->init_layout.ro_size = 0; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1e4b7d0d53108a01c36897e75c40544d036ebff7..7370608cbcab746a58a81c920a496471631821b5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1448,7 +1448,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * yield - it could be a while. */ if (unlikely(queued)) { - ktime_t to = NSEC_PER_MSEC / HZ; + ktime_t to = NSEC_PER_MSEC; set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&to, HRTIMER_MODE_REL); @@ -2466,7 +2466,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) unsigned long flags; int cpu; - init_new_task_load(p, false); + init_new_task_load(p); cpu = get_cpu(); __sched_fork(clone_flags, p); /* @@ -4917,6 +4917,7 @@ static inline bool is_sched_lib_based_app(pid_t pid) char path_buf[LIB_PATH_LENGTH]; bool found = false; struct task_struct *p; + struct mm_struct *mm; if (strnlen(sched_lib_name, LIB_PATH_LENGTH) == 0) return false; @@ -4933,11 +4934,12 @@ static inline bool is_sched_lib_based_app(pid_t pid) get_task_struct(p); rcu_read_unlock(); - if (!p->mm) + mm = get_task_mm(p); + if (!mm) goto put_task_struct; - down_read(&p->mm->mmap_sem); - for (vma = p->mm->mmap; vma ; vma = vma->vm_next) { + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma ; vma = vma->vm_next) { if (vma->vm_file && vma->vm_flags & VM_EXEC) { name = d_path(&vma->vm_file->f_path, path_buf, LIB_PATH_LENGTH); @@ -4953,7 +4955,8 @@ static inline bool is_sched_lib_based_app(pid_t pid) } release_sem: - up_read(&p->mm->mmap_sem); + up_read(&mm->mmap_sem); + mmput(mm); put_task_struct: put_task_struct(p); return found; @@ -5511,14 +5514,12 @@ void show_state_filter(unsigned long state_filter) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void init_idle(struct task_struct *idle, int cpu, bool cpu_up) +void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; __sched_fork(0, idle); - if (!cpu_up) - init_new_task_load(idle, true); raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); @@ -5820,6 +5821,8 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, rq_unlock(rq, rf); raw_spin_lock(&next->pi_lock); rq_relock(rq, rf); + if (!(rq->clock_update_flags & RQCF_UPDATED)) + update_rq_clock(rq); /* * Since we're inside stop-machine, _nothing_ should have @@ -5843,6 +5846,8 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, rq = dead_rq; *rf = orf; rq_relock(rq, rf); + if (!(rq->clock_update_flags & RQCF_UPDATED)) + update_rq_clock(rq); } raw_spin_unlock(&next->pi_lock); } @@ -5871,7 +5876,7 @@ int do_isolation_work_cpu_stop(void *data) sched_ttwu_pending(); /* Update our root-domain */ - raw_spin_lock(&rq->lock); + rq_lock(rq, &rf); /* * Temporarily mark the rq as offline. This will allow us to @@ -5886,7 +5891,7 @@ int do_isolation_work_cpu_stop(void *data) if (rq->rd) set_rq_online(rq); - raw_spin_unlock(&rq->lock); + rq_unlock(rq, &rf); clear_walt_request(cpu); local_irq_enable(); @@ -6173,6 +6178,18 @@ int sched_cpu_activate(unsigned int cpu) struct rq *rq = cpu_rq(cpu); struct rq_flags rf; +#ifdef CONFIG_SCHED_SMT + /* + * The sched_smt_present static key needs to be evaluated on every + * hotplug event because at boot time SMT might be disabled when + * the number of booted CPUs is limited. + * + * If then later a sibling gets hotplugged, then the key would stay + * off and SMT scheduling would never be functional. + */ + if (cpumask_weight(cpu_smt_mask(cpu)) > 1) + static_branch_enable_cpuslocked(&sched_smt_present); +#endif set_cpu_active(cpu, true); if (sched_smp_initialized) { @@ -6276,22 +6293,6 @@ int sched_cpu_dying(unsigned int cpu) } #endif -#ifdef CONFIG_SCHED_SMT -DEFINE_STATIC_KEY_FALSE(sched_smt_present); - -static void sched_init_smt(void) -{ - /* - * We've enumerated all CPUs and will assume that if any CPU - * has SMT siblings, CPU0 will too. - */ - if (cpumask_weight(cpu_smt_mask(0)) > 1) - static_branch_enable(&sched_smt_present); -} -#else -static inline void sched_init_smt(void) { } -#endif - void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; @@ -6323,8 +6324,6 @@ void __init sched_init_smp(void) init_sched_rt_class(); init_sched_dl_class(); - sched_init_smt(); - sched_smp_initialized = true; } @@ -6524,7 +6523,8 @@ void __init sched_init(void) * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ - init_idle(current, smp_processor_id(), false); + init_idle(current, smp_processor_id()); + init_new_task_load(current); calc_load_update = jiffies + LOAD_FREQ; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1ff95e922ac281493da7c8af47f0eb925ba6e7d8..7ae854f952676e111300aa37c1f1512698abce2d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -199,7 +199,9 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = { #ifdef CONFIG_SCHED_WALT /* 1ms default for 20ms window size scaled to 1024 */ -unsigned int sysctl_sched_min_task_util_for_boost_colocation = 51; +unsigned int sysctl_sched_min_task_util_for_boost = 51; +/* 0.68ms default for 20ms window size scaled to 1024 */ +unsigned int sysctl_sched_min_task_util_for_colocation = 35; #endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) @@ -6938,6 +6940,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p } #ifdef CONFIG_SCHED_SMT +DEFINE_STATIC_KEY_FALSE(sched_smt_present); static inline void set_idle_cores(int cpu, int val) { @@ -7359,6 +7362,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, unsigned int active_cpus_count = 0; int prev_cpu = task_cpu(p); bool next_group_higher_cap = false; + int isolated_candidate = -1; *backup_cpu = -1; @@ -7419,6 +7423,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (!cpu_online(i) || cpu_isolated(i)) continue; + if (isolated_candidate == -1) + isolated_candidate = i; /* * This CPU is the target of an active migration that's * yet to complete. Avoid placing another task on it. @@ -7580,12 +7586,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (capacity_orig < capacity_orig_of(cpu)) continue; - /* - * Favor CPUs with smaller capacity for non latency - * sensitive tasks. - */ - if (capacity_orig > target_capacity) - continue; + + /* * Case B) Non latency sensitive tasks on IDLE CPUs. @@ -7687,6 +7689,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (!prefer_idle && (target_cpu != -1 || best_idle_cpu != -1) && (fbt_env->placement_boost == SCHED_BOOST_NONE || + sched_boost() != FULL_THROTTLE_BOOST || (fbt_env->placement_boost == SCHED_BOOST_ON_BIG && !next_group_higher_cap))) break; @@ -7781,6 +7784,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, target_cpu = *backup_cpu; *backup_cpu = -1; } + + if (target_cpu == -1 && isolated_candidate != -1 && + cpu_isolated(prev_cpu)) + target_cpu = isolated_candidate; + out: return target_cpu; } @@ -7953,6 +7961,15 @@ static inline int wake_to_idle(struct task_struct *p) } #ifdef CONFIG_SCHED_WALT +static inline bool is_task_util_above_min_thresh(struct task_struct *p) +{ + unsigned int threshold = (sysctl_sched_boost == CONSERVATIVE_BOOST) ? + sysctl_sched_min_task_util_for_boost : + sysctl_sched_min_task_util_for_colocation; + + return task_util(p) > threshold; +} + static inline struct cpumask *find_rtg_target(struct task_struct *p) { struct related_thread_group *grp; @@ -7961,9 +7978,7 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p) rcu_read_lock(); grp = task_related_thread_group(p); - if (grp && grp->preferred_cluster && - (task_util(p) > - sysctl_sched_min_task_util_for_boost_colocation)) { + if (grp && grp->preferred_cluster && is_task_util_above_min_thresh(p)) { rtg_target = &grp->preferred_cluster->cpus; if (!task_fits_max(p, cpumask_first(rtg_target))) rtg_target = NULL; @@ -9770,6 +9785,11 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) if (sgs->sum_nr_running <= sgs->group_weight) return false; +#ifdef CONFIG_SCHED_WALT + if (env->idle != CPU_NOT_IDLE && walt_rotation_enabled) + return true; +#endif + if ((sgs->group_capacity * 100) < (sgs->group_util * env->sd->imbalance_pct)) return true; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index c1dbb2cc1f29c8a936f5a0fe4b1a467c10e2b9b5..2e973e667b0d1857d7e0cd27860f0dec4b48e439 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -147,13 +147,15 @@ static void cpuidle_idle_call(void) } /* - * Tell the RCU framework we are entering an idle section, - * so no more rcu read side critical sections and one more + * The RCU framework needs to be told that we are entering an idle + * section, so no more rcu read side critical sections and one more * step to the grace period */ - rcu_idle_enter(); if (cpuidle_not_available(drv, dev)) { + tick_nohz_idle_stop_tick(); + rcu_idle_enter(); + default_idle_call(); goto exit_idle; } @@ -170,20 +172,37 @@ static void cpuidle_idle_call(void) if (idle_should_enter_s2idle() || dev->use_deepest_state) { if (idle_should_enter_s2idle()) { + rcu_idle_enter(); + entered_state = cpuidle_enter_s2idle(drv, dev); if (entered_state > 0) { local_irq_enable(); goto exit_idle; } + + rcu_idle_exit(); } + tick_nohz_idle_stop_tick(); + rcu_idle_enter(); + next_state = cpuidle_find_deepest_state(drv, dev); call_cpuidle(drv, dev, next_state); } else { + bool stop_tick = true; + /* * Ask the cpuidle framework to choose a convenient idle state. */ - next_state = cpuidle_select(drv, dev); + next_state = cpuidle_select(drv, dev, &stop_tick); + + if (stop_tick) + tick_nohz_idle_stop_tick(); + else + tick_nohz_idle_retain_tick(); + + rcu_idle_enter(); + entered_state = call_cpuidle(drv, dev, next_state); /* * Give the governor an opportunity to reflect on the outcome @@ -228,6 +247,7 @@ static void do_idle(void) rmb(); if (cpu_is_offline(smp_processor_id())) { + tick_nohz_idle_stop_tick_protected(); cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } @@ -241,10 +261,12 @@ static void do_idle(void) * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. */ - if (cpu_idle_force_poll || tick_check_broadcast_expired()) + if (cpu_idle_force_poll || tick_check_broadcast_expired()) { + tick_nohz_idle_restart_tick(); cpu_idle_poll(); - else + } else { cpuidle_idle_call(); + } arch_cpu_idle_exit(); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 21e7d2c54e6bdef6cc858ab8e895c1078ac06d58..82ad44dc240ff8b56924a782e9951be1bc5fe04e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2940,7 +2940,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p) */ if (sysctl_sched_boost == CONSERVATIVE_BOOST && task_util(p) <= - sysctl_sched_min_task_util_for_boost_colocation) + sysctl_sched_min_task_util_for_boost) policy = SCHED_BOOST_NONE; } diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 082dcf58cc5849031cf608bc07c816608651f053..ad929ceca4062ff08942caa4526e0c2938245389 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -349,6 +349,12 @@ static void update_task_cpu_cycles(struct task_struct *p, int cpu, p->cpu_cycles = read_cycle_counter(cpu, wallclock); } +static inline bool is_ed_enabled(void) +{ + return (walt_rotation_enabled || (sched_boost_policy() != + SCHED_BOOST_NONE)); +} + void clear_ed_task(struct task_struct *p, struct rq *rq) { if (p == rq->ed_task) @@ -367,8 +373,7 @@ bool early_detection_notify(struct rq *rq, u64 wallclock) rq->ed_task = NULL; - if ((!walt_rotation_enabled && sched_boost_policy() == - SCHED_BOOST_NONE) || !rq->cfs.h_nr_running) + if (!is_ed_enabled() || !rq->cfs.h_nr_running) return 0; list_for_each_entry(p, &rq->cfs_tasks, se.group_node) { @@ -874,11 +879,13 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) irq_work_queue(&walt_migration_irq_work); } - if (p == src_rq->ed_task) { - src_rq->ed_task = NULL; - dest_rq->ed_task = p; - } else if (is_ed_task(p, wallclock)) { - dest_rq->ed_task = p; + if (is_ed_enabled()) { + if (p == src_rq->ed_task) { + src_rq->ed_task = NULL; + dest_rq->ed_task = p; + } else if (is_ed_task(p, wallclock)) { + dest_rq->ed_task = p; + } } done: @@ -1974,12 +1981,16 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event, update_task_demand(p, rq, event, wallclock); update_cpu_busy_time(p, rq, event, wallclock, irqtime); update_task_pred_demand(rq, p, event); -done: + + if (exiting_task(p)) + goto done; + trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime, rq->cc.cycles, rq->cc.time, &rq->grp_time); trace_sched_update_task_ravg_mini(p, rq, event, wallclock, irqtime, rq->cc.cycles, rq->cc.time, &rq->grp_time); +done: p->ravg.mark_start = wallclock; run_walt_irq_work(old_window_start, rq); @@ -2000,7 +2011,7 @@ int sched_set_init_task_load(struct task_struct *p, int init_load_pct) return 0; } -void init_new_task_load(struct task_struct *p, bool idle_task) +void init_new_task_load(struct task_struct *p) { int i; u32 init_load_windows = sched_init_task_load_windows; @@ -2019,9 +2030,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task) /* Don't have much choice. CPU frequency would be bogus */ BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu); - if (idle_task) - return; - if (init_load_pct) { init_load_windows = div64_u64((u64)init_load_pct * (u64)sched_ravg_window, 100); diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index 995efe7cce6d57acde66de31244054d486ea4d40..663334d61c48626f13ff2bce219deb13a79074b9 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -153,7 +153,7 @@ extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p, extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p); extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p); extern void fixup_busy_time(struct task_struct *p, int new_cpu); -extern void init_new_task_load(struct task_struct *p, bool idle_task); +extern void init_new_task_load(struct task_struct *p); extern void mark_task_starting(struct task_struct *p); extern void set_window_start(struct rq *rq); void account_irqtime(int cpu, struct task_struct *curr, u64 delta, @@ -350,7 +350,7 @@ static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, } static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } -static inline void init_new_task_load(struct task_struct *p, bool idle_task) +static inline void init_new_task_load(struct task_struct *p) { } diff --git a/kernel/smp.c b/kernel/smp.c index 7672a688a9b6ea2126ef424c06b512d68d48727c..d710fd3eb242574a3cffbb32daabb3adc010b96f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -616,6 +616,8 @@ void __init smp_init(void) num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); + /* Final decision about SMT support */ + cpu_smt_check_topology(); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); } diff --git a/kernel/smpboot.c b/kernel/smpboot.c index eaf52c589e8e33b0b21accf01826df144670efbe..5043e7433f4b15879a6498ed3d1ca6cfa2876f83 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu) if (!tsk) return ERR_PTR(-ENOMEM); - init_idle(tsk, cpu, true); + init_idle(tsk, cpu); return tsk; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f5297a0ee323c72aa6bd146c5257fcc0dcfcbbb7..9a2146de5e5a88bc593d53a31740f5f984c6c6d7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -378,8 +378,17 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, { - .procname = "sched_min_task_util_for_boost_colocation", - .data = &sysctl_sched_min_task_util_for_boost_colocation, + .procname = "sched_min_task_util_for_boost", + .data = &sysctl_sched_min_task_util_for_boost, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one_thousand, + }, + { + .procname = "sched_min_task_util_for_colocation", + .data = &sysctl_sched_min_task_util_for_colocation, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 98d08d32f2aeff41e68b6525137cbd3cd8be06e8..1e7bbbbfc551dee15b567191f18dde51f93b9e1a 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -463,13 +463,19 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, #endif } -static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) +static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, + const struct hrtimer *exclude) { struct hrtimer_clock_base *base = cpu_base->clock_base; unsigned int active = cpu_base->active_bases; ktime_t expires, expires_next = KTIME_MAX; - hrtimer_update_next_timer(cpu_base, NULL); + /* + * Skip initializing cpu_base->next_timer to NULL as we skip updating + * next_timer in below loop if the timer is being exluded. + */ + if (!exclude) + hrtimer_update_next_timer(cpu_base, NULL); for (; active; base++, active >>= 1) { struct timerqueue_node *next; struct hrtimer *timer; @@ -479,9 +485,24 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) next = timerqueue_getnext(&base->active); timer = container_of(next, struct hrtimer, node); + if (timer == exclude) { + /* Get to the next timer in the queue. */ + struct rb_node *rbn = rb_next(&next->node); + + next = rb_entry_safe(rbn, struct timerqueue_node, node); + if (!next) + continue; + + timer = container_of(next, struct hrtimer, node); + } expires = ktime_sub(hrtimer_get_expires(timer), base->offset); if (expires < expires_next) { expires_next = expires; + + /* Skip cpu_base update if a timer is being excluded. */ + if (exclude) + continue; + hrtimer_update_next_timer(cpu_base, timer); } } @@ -560,7 +581,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) if (!cpu_base->hres_active) return; - expires_next = __hrtimer_get_next_event(cpu_base); + expires_next = __hrtimer_get_next_event(cpu_base, NULL); if (skip_equal && expires_next == cpu_base->expires_next) return; @@ -1086,7 +1107,30 @@ u64 hrtimer_get_next_event(void) raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!__hrtimer_hres_active(cpu_base)) - expires = __hrtimer_get_next_event(cpu_base); + expires = __hrtimer_get_next_event(cpu_base, NULL); + + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); + + return expires; +} + +/** + * hrtimer_next_event_without - time until next expiry event w/o one timer + * @exclude: timer to exclude + * + * Returns the next expiry time over all timers except for the @exclude one or + * KTIME_MAX if none of them is pending. + */ +u64 hrtimer_next_event_without(const struct hrtimer *exclude) +{ + struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); + u64 expires = KTIME_MAX; + unsigned long flags; + + raw_spin_lock_irqsave(&cpu_base->lock, flags); + + if (__hrtimer_hres_active(cpu_base)) + expires = __hrtimer_get_next_event(cpu_base, exclude); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); @@ -1328,7 +1372,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) __hrtimer_run_queues(cpu_base, now); /* Reevaluate the clock bases for the next expiry */ - expires_next = __hrtimer_get_next_event(cpu_base); + expires_next = __hrtimer_get_next_event(cpu_base, NULL); /* * Store the new expiry value so the migration code can verify * against it. diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 99e03bec68e4cbaa3ba103f3097eeb1e5d38e3ee..9de770228cb0ec0197cc5df75b44d55ac760e553 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -31,7 +31,7 @@ /* USER_HZ period (usecs): */ -unsigned long tick_usec = TICK_USEC; +unsigned long tick_usec = USER_TICK_USEC; /* SHIFTED_HZ period (nsecs): */ unsigned long tick_nsec; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index dc7792f71602ae55cc66d86ff116f7385e15e671..26152997d1b39490c65fbc708c1c552dff5de82c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -569,14 +569,11 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) sched_clock_idle_wakeup_event(); } -static ktime_t tick_nohz_start_idle(struct tick_sched *ts) +static void tick_nohz_start_idle(struct tick_sched *ts) { - ktime_t now = ktime_get(); - - ts->idle_entrytime = now; + ts->idle_entrytime = ktime_get(); ts->idle_active = 1; sched_clock_idle_sleep_event(); - return now; } /** @@ -685,13 +682,10 @@ static inline bool local_timer_softirq_pending(void) return local_softirq_pending() & BIT(TIMER_SOFTIRQ); } -static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, - ktime_t now, int cpu) +static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { - struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; unsigned long seq, basejiff; - ktime_t tick; /* Read jiffies and the time when jiffies were updated last */ do { @@ -700,6 +694,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, basejiff = jiffies; } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff; + ts->timer_expires_base = basemono; /* * Keep the periodic tick, when RCU, architecture or irq_work @@ -744,32 +739,20 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, * next period, so no point in stopping it either, bail. */ if (!ts->tick_stopped) { - tick = 0; + ts->timer_expires = 0; goto out; } } /* - * If this CPU is the one which updates jiffies, then give up - * the assignment and let it be taken by the CPU which runs - * the tick timer next, which might be this CPU as well. If we - * don't drop this here the jiffies might be stale and - * do_timer() never invoked. Keep track of the fact that it - * was the one which had the do_timer() duty last. If this CPU - * is the one which had the do_timer() duty last, we limit the - * sleep time to the timekeeping max_deferment value. + * If this CPU is the one which had the do_timer() duty last, we limit + * the sleep time to the timekeeping max_deferment value. * Otherwise we can sleep as long as we want. */ delta = timekeeping_max_deferment(); - if (cpu == tick_do_timer_cpu) { - tick_do_timer_cpu = TICK_DO_TIMER_NONE; - ts->do_timer_last = 1; - } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { + if (cpu != tick_do_timer_cpu && + (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) delta = KTIME_MAX; - ts->do_timer_last = 0; - } else if (!ts->do_timer_last) { - delta = KTIME_MAX; - } #ifdef CONFIG_NO_HZ_FULL /* Limit the tick delta to the maximum scheduler deferment */ @@ -783,14 +766,42 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, else expires = KTIME_MAX; - expires = min_t(u64, expires, next_tick); - tick = expires; + ts->timer_expires = min_t(u64, expires, next_tick); + +out: + return ts->timer_expires; +} + +static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) +{ + struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); + u64 basemono = ts->timer_expires_base; + u64 expires = ts->timer_expires; + ktime_t tick = expires; + + /* Make sure we won't be trying to stop it twice in a row. */ + ts->timer_expires_base = 0; + + /* + * If this CPU is the one which updates jiffies, then give up + * the assignment and let it be taken by the CPU which runs + * the tick timer next, which might be this CPU as well. If we + * don't drop this here the jiffies might be stale and + * do_timer() never invoked. Keep track of the fact that it + * was the one which had the do_timer() duty last. + */ + if (cpu == tick_do_timer_cpu) { + tick_do_timer_cpu = TICK_DO_TIMER_NONE; + ts->do_timer_last = 1; + } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { + ts->do_timer_last = 0; + } /* Skip reprogram of event if its not changed */ if (ts->tick_stopped && (expires == ts->next_tick)) { /* Sanity check: make sure clockevent is actually programmed */ if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) - goto out; + return; WARN_ON_ONCE(1); printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", @@ -823,7 +834,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, if (unlikely(expires == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); - goto out; + return; } if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { @@ -832,15 +843,22 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, hrtimer_set_expires(&ts->sched_timer, tick); tick_program_event(tick, 1); } +} -out: - /* - * Update the estimated sleep length until the next timer - * (not only the tick). - */ - ts->sleep_length = ktime_sub(dev->next_event, now); - return tick; +static void tick_nohz_retain_tick(struct tick_sched *ts) +{ + ts->timer_expires_base = 0; +} + +#ifdef CONFIG_NO_HZ_FULL +static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) +{ + if (tick_nohz_next_event(ts, cpu)) + tick_nohz_stop_tick(ts, cpu); + else + tick_nohz_retain_tick(ts); } +#endif /* CONFIG_NO_HZ_FULL */ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { @@ -877,7 +895,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) return; if (can_stop_full_tick(cpu, ts)) - tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); + tick_nohz_stop_sched_tick(ts, cpu); else if (ts->tick_stopped) tick_nohz_restart_sched_tick(ts, ktime_get()); #endif @@ -903,10 +921,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; } - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { - ts->sleep_length = NSEC_PER_SEC / HZ; + if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) return false; - } if (need_resched()) return false; @@ -941,46 +957,70 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return true; } -static void __tick_nohz_idle_enter(struct tick_sched *ts) +static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) { - ktime_t now, expires; + ktime_t expires; int cpu = smp_processor_id(); #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif - now = tick_nohz_start_idle(ts); - if (can_stop_idle_tick(cpu, ts)) { + /* + * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the + * tick timer expiration time is known already. + */ + if (ts->timer_expires_base) + expires = ts->timer_expires; + else if (can_stop_idle_tick(cpu, ts)) + expires = tick_nohz_next_event(ts, cpu); + else + return; + + ts->idle_calls++; + + if (expires > 0LL) { int was_stopped = ts->tick_stopped; - ts->idle_calls++; + tick_nohz_stop_tick(ts, cpu); - expires = tick_nohz_stop_sched_tick(ts, now, cpu); - if (expires > 0LL) { - ts->idle_sleeps++; - ts->idle_expires = expires; - } + ts->idle_sleeps++; + ts->idle_expires = expires; if (!was_stopped && ts->tick_stopped) { ts->idle_jiffies = ts->last_jiffies; nohz_balance_enter_idle(cpu); } + } else { + tick_nohz_retain_tick(ts); } } /** - * tick_nohz_idle_enter - stop the idle tick from the idle task + * tick_nohz_idle_stop_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick - * Called when we start the idle loop. - * - * The arch is responsible of calling: + */ +void tick_nohz_idle_stop_tick(void) +{ + __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); +} + +void tick_nohz_idle_retain_tick(void) +{ + tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); + /* + * Undo the effect of get_next_timer_interrupt() called from + * tick_nohz_next_event(). + */ + timer_clear_idle(); +} + +/** + * tick_nohz_idle_enter - prepare for entering idle on the current CPU * - * - rcu_idle_enter() after its last use of RCU before the CPU is put - * to sleep. - * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. + * Called when we start the idle loop. */ void tick_nohz_idle_enter(void) { @@ -990,7 +1030,7 @@ void tick_nohz_idle_enter(void) /* * Update the idle state in the scheduler domain hierarchy - * when tick_nohz_stop_sched_tick() is called from the idle loop. + * when tick_nohz_stop_tick() is called from the idle loop. * State will be updated to busy during the first busy tick after * exiting idle. */ @@ -999,8 +1039,11 @@ void tick_nohz_idle_enter(void) local_irq_disable(); ts = this_cpu_ptr(&tick_cpu_sched); + + WARN_ON_ONCE(ts->timer_expires_base); + ts->inidle = 1; - __tick_nohz_idle_enter(ts); + tick_nohz_start_idle(ts); local_irq_enable(); } @@ -1018,21 +1061,62 @@ void tick_nohz_irq_exit(void) struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); if (ts->inidle) - __tick_nohz_idle_enter(ts); + tick_nohz_start_idle(ts); else tick_nohz_full_update_tick(ts); } /** - * tick_nohz_get_sleep_length - return the length of the current sleep + * tick_nohz_idle_got_tick - Check whether or not the tick handler has run + */ +bool tick_nohz_idle_got_tick(void) +{ + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + + if (ts->inidle > 1) { + ts->inidle = 1; + return true; + } + return false; +} + +/** + * tick_nohz_get_sleep_length - return the expected length of the current sleep + * @delta_next: duration until the next event if the tick cannot be stopped * * Called from power state control code with interrupts disabled */ -ktime_t tick_nohz_get_sleep_length(void) +ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { + struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + int cpu = smp_processor_id(); + /* + * The idle entry time is expected to be a sufficient approximation of + * the current time at this point. + */ + ktime_t now = ts->idle_entrytime; + ktime_t next_event; + + WARN_ON_ONCE(!ts->inidle); + + *delta_next = ktime_sub(dev->next_event, now); - return ts->sleep_length; + if (!can_stop_idle_tick(cpu, ts)) + return *delta_next; + + next_event = tick_nohz_next_event(ts, cpu); + if (!next_event) + return *delta_next; + + /* + * If the next highres timer to expire is earlier than next_event, the + * idle governor needs to know that. + */ + next_event = min_t(u64, next_event, + hrtimer_next_event_without(&ts->sched_timer)); + + return ktime_sub(next_event, now); } /** @@ -1081,6 +1165,20 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) #endif } +static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now) +{ + tick_nohz_restart_sched_tick(ts, now); + tick_nohz_account_idle_ticks(ts); +} + +void tick_nohz_idle_restart_tick(void) +{ + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + + if (ts->tick_stopped) + __tick_nohz_idle_restart_tick(ts, ktime_get()); +} + /** * tick_nohz_idle_exit - restart the idle tick from the idle task * @@ -1096,6 +1194,7 @@ void tick_nohz_idle_exit(void) local_irq_disable(); WARN_ON_ONCE(!ts->inidle); + WARN_ON_ONCE(ts->timer_expires_base); ts->inidle = 0; @@ -1105,10 +1204,8 @@ void tick_nohz_idle_exit(void) if (ts->idle_active) tick_nohz_stop_idle(ts, now); - if (ts->tick_stopped) { - tick_nohz_restart_sched_tick(ts, now); - tick_nohz_account_idle_ticks(ts); - } + if (ts->tick_stopped) + __tick_nohz_idle_restart_tick(ts, now); local_irq_enable(); } @@ -1122,6 +1219,9 @@ static void tick_nohz_handler(struct clock_event_device *dev) struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); + if (ts->inidle) + ts->inidle = 2; + dev->next_event = KTIME_MAX; tick_sched_do_timer(now); @@ -1230,6 +1330,9 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); + if (ts->inidle) + ts->inidle = 2; + tick_sched_do_timer(now); /* diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 954b43dbf21cb7f64f31bb6a849a0654a63620a5..2b845f2c44b1b3c72ed827d9bf8542beb8af46f7 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -38,7 +38,8 @@ enum tick_nohz_mode { * @idle_exittime: Time when the idle state was left * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding - * @sleep_length: Duration of the current idle sleep + * @timer_expires: Anticipated timer expiration time (in case sched tick is stopped) + * @timer_expires_base: Base time clock monotonic for @timer_expires * @do_timer_lst: CPU was the last one doing do_timer before going idle */ struct tick_sched { @@ -58,8 +59,9 @@ struct tick_sched { ktime_t idle_exittime; ktime_t idle_sleeptime; ktime_t iowait_sleeptime; - ktime_t sleep_length; unsigned long last_jiffies; + u64 timer_expires; + u64 timer_expires_base; u64 next_timer; ktime_t idle_expires; int do_timer_last; diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c index fb84c6c9b28ee1a49389578be161cf0bc050968a..3bb23b40fe262ed9a831864167422e0bd6ceab2e 100644 --- a/kernel/trace/ipc_logging.c +++ b/kernel/trace/ipc_logging.c @@ -903,6 +903,7 @@ void ipc_log_context_free(struct kref *kref) int ipc_log_context_destroy(void *ctxt) { struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt; + struct dfunc_info *df_info = NULL, *tmp = NULL; unsigned long flags; if (!ilctxt) @@ -913,6 +914,10 @@ int ipc_log_context_destroy(void *ctxt) spin_lock(&ilctxt->context_lock_lhb1); ilctxt->destroyed = true; complete_all(&ilctxt->read_avail); + list_for_each_entry_safe(df_info, tmp, &ilctxt->dfunc_info_list, list) { + list_del(&df_info->list); + kfree(df_info); + } spin_unlock(&ilctxt->context_lock_lhb1); write_lock_irqsave(&context_list_lock_lha1, flags); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8de773161495ac40493cb7998e2c559e962738ec..9bb72d012b8029696faf5da0a9c52b286c94408f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2945,6 +2945,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { + len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); + stm_log(OST_ENTITY_TRACE_PRINTK, tbuffer, len+1); + __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 40f399a8eda3e60462323a6fa15797c4a4a3b9ce..d87c2fa56ed5ea7be0f80886537bd5225c20de7f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -133,6 +133,18 @@ config DYNAMIC_DEBUG See Documentation/admin-guide/dynamic-debug-howto.rst for additional information. +config DEBUG_MODULE_LOAD_INFO + bool "Use prints for module info under a debug flag" + help + If you say Y here the resulting kernel image will include + debug prints which was kept under DEBUG_MODULE_LOAD_INFO. + This will be used by developer to debug loadable modules in + the kernel. + Say Y here only if you plan to debug the kernel. + + If unsure, say N. + + endmenu # "printk and dmesg options" menu "Compile-time checks and compiler options" diff --git a/lib/Makefile b/lib/Makefile index b8f2c16fccaa9f4b24b81df833e72f5594776081..dbacf0c95d33e63be32d3024fd7718a6581ebf9c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -34,6 +34,7 @@ lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o obj-y += lockref.o +KASAN_SANITIZE_find_bit.o := n obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ diff --git a/lib/ioremap.c b/lib/ioremap.c index 54e5bbaa3200317534926e65982dcd3cbab71492..517f5853ffed1726a462543f902450ddb7f9a9b4 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, if (ioremap_pmd_enabled() && ((next - addr) == PMD_SIZE) && IS_ALIGNED(phys_addr + addr, PMD_SIZE) && - pmd_free_pte_page(pmd)) { + pmd_free_pte_page(pmd, addr)) { if (pmd_set_huge(pmd, phys_addr + addr, prot)) continue; } @@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, if (ioremap_pud_enabled() && ((next - addr) == PUD_SIZE) && IS_ALIGNED(phys_addr + addr, PUD_SIZE) && - pud_free_pmd_page(pud)) { + pud_free_pmd_page(pud, addr)) { if (pud_set_huge(pud, phys_addr + addr, prot)) continue; } diff --git a/mm/memory.c b/mm/memory.c index b41012b9f5c2122bf6719bf18357e9cb4ccd8c03..f416225e15b04d6235f3f4f33e439f8737ed2d8f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1903,6 +1903,9 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; + if (!pfn_modify_allowed(pfn, pgprot)) + return -EACCES; + track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, @@ -1924,6 +1927,9 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, track_pfn_insert(vma, &pgprot, pfn); + if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) + return -EACCES; + /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* @@ -1971,6 +1977,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; spinlock_t *ptl; + int err = 0; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) @@ -1978,12 +1985,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(*pte)); + if (!pfn_modify_allowed(pfn, prot)) { + err = -EACCES; + break; + } set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); - return 0; + return err; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, @@ -1992,6 +2003,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, { pmd_t *pmd; unsigned long next; + int err; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); @@ -2000,9 +2012,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); - if (remap_pte_range(mm, pmd, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; + err = remap_pte_range(mm, pmd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + return err; } while (pmd++, addr = next, addr != end); return 0; } @@ -2013,6 +2026,7 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, { pud_t *pud; unsigned long next; + int err; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, p4d, addr); @@ -2020,9 +2034,10 @@ static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, return -ENOMEM; do { next = pud_addr_end(addr, end); - if (remap_pmd_range(mm, pud, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; + err = remap_pmd_range(mm, pud, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + return err; } while (pud++, addr = next, addr != end); return 0; } @@ -2033,6 +2048,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, { p4d_t *p4d; unsigned long next; + int err; pfn -= addr >> PAGE_SHIFT; p4d = p4d_alloc(mm, pgd, addr); @@ -2040,9 +2056,10 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, return -ENOMEM; do { next = p4d_addr_end(addr, end); - if (remap_pud_range(mm, p4d, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; + err = remap_pud_range(mm, p4d, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + return err; } while (p4d++, addr = next, addr != end); return 0; } @@ -2271,7 +2288,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long end = addr + size; int err; - if (WARN_ON(addr >= end)) + if (WARN_ON(addr >= end - 1)) return -EINVAL; pgd = pgd_offset(mm, addr); diff --git a/mm/mmap.c b/mm/mmap.c index ccde2f4135163ebc92ee047d0eb8ff8b6a2b46eb..e81d20746e95f613e94f29bb48876c1b9cca68a8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3138,9 +3138,7 @@ void exit_mmap(struct mm_struct *mm) * which clears VM_LOCKED, otherwise the oom reaper cannot * reliably test it. */ - mutex_lock(&oom_lock); __oom_reap_task_mm(mm); - mutex_unlock(&oom_lock); set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); diff --git a/mm/mprotect.c b/mm/mprotect.c index 44d3a9c8f0e7866ff7b69a9fa3ee13371c59c016..964e5ad695ad0fc391c15d60ca3669a0a375589d 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -292,6 +292,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, return pages; } +static int prot_none_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + 0 : -EACCES; +} + +static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk) +{ + return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + 0 : -EACCES; +} + +static int prot_none_test(unsigned long addr, unsigned long next, + struct mm_walk *walk) +{ + return 0; +} + +static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long newflags) +{ + pgprot_t new_pgprot = vm_get_page_prot(newflags); + struct mm_walk prot_none_walk = { + .pte_entry = prot_none_pte_entry, + .hugetlb_entry = prot_none_hugetlb_entry, + .test_walk = prot_none_test, + .mm = current->mm, + .private = &new_pgprot, + }; + + return walk_page_range(start, end, &prot_none_walk); +} + int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) @@ -309,6 +345,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, return 0; } + /* + * Do PROT_NONE PFN permission checks here when we can still + * bail out without undoing a lot of state. This is a rather + * uncommon case, so doesn't need to be very optimized. + */ + if (arch_has_pfn_modify_check() && + (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && + (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { + error = prot_none_walk(vma, start, end, newflags); + if (error) + return error; + } + /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3a3a5cf9c4c92c291f5f4600278c9d80afcc91ff..a5d7a8b986749a04b2e28d7ff674ef6f3cc358a9 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -504,28 +504,9 @@ void __oom_reap_task_mm(struct mm_struct *mm) static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { - bool ret = true; - - /* - * We have to make sure to not race with the victim exit path - * and cause premature new oom victim selection: - * oom_reap_task_mm exit_mm - * mmget_not_zero - * mmput - * atomic_dec_and_test - * exit_oom_victim - * [...] - * out_of_memory - * select_bad_process - * # no TIF_MEMDIE task selects new victim - * unmap_page_range # frees some memory - */ - mutex_lock(&oom_lock); - if (!down_read_trylock(&mm->mmap_sem)) { - ret = false; trace_skip_task_reaping(tsk->pid); - goto unlock_oom; + return false; } /* @@ -540,7 +521,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) if (mm_has_notifiers(mm)) { up_read(&mm->mmap_sem); schedule_timeout_idle(HZ); - goto unlock_oom; + return true; } /* @@ -552,7 +533,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) if (test_bit(MMF_OOM_SKIP, &mm->flags)) { up_read(&mm->mmap_sem); trace_skip_task_reaping(tsk->pid); - goto unlock_oom; + return true; } trace_start_task_reaping(tsk->pid); @@ -567,9 +548,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) up_read(&mm->mmap_sem); trace_finish_task_reaping(tsk->pid); -unlock_oom: - mutex_unlock(&oom_lock); - return ret; + return true; } #define MAX_OOM_REAP_RETRIES 10 diff --git a/mm/rmap.c b/mm/rmap.c index ebb118bf0bcca711cd18a339d2d62a65be2e0bd3..ac2e6ed8c1956a7fc9fffba67cae44cb3c91dadf 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1723,6 +1723,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, unsigned long address = vma_address(page, rwc->target_vma); rwc->rmap_one(page, rwc->target_vma, address, rwc->arg); + return; } if (locked) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 2b2de9e273fab84ca484f3de8daf591bbd0b0f46..9c4ff1ac482bf2ed9ae8ce204d6917af0f774b73 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2930,6 +2930,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) return 0; } + +/* + * Find out how many pages are allowed for a single swap device. There + * are two limiting factors: + * 1) the number of bits for the swap offset in the swp_entry_t type, and + * 2) the number of bits in the swap pte, as defined by the different + * architectures. + * + * In order to find the largest possible bit mask, a swap entry with + * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, + * decoded to a swp_entry_t again, and finally the swap offset is + * extracted. + * + * This will mask all the bits from the initial ~0UL mask that can't + * be encoded in either the swp_entry_t or the architecture definition + * of a swap pte. + */ +unsigned long generic_max_swapfile_size(void) +{ + return swp_offset(pte_to_swp_entry( + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; +} + +/* Can be overridden by an architecture for additional checks. */ +__weak unsigned long max_swapfile_size(void) +{ + return generic_max_swapfile_size(); +} + static unsigned long read_swap_header(struct swap_info_struct *p, union swap_header *swap_header, struct inode *inode) @@ -2965,22 +2994,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p, p->cluster_next = 1; p->cluster_nr = 0; - /* - * Find out how many pages are allowed for a single swap - * device. There are two limiting factors: 1) the number - * of bits for the swap offset in the swp_entry_t type, and - * 2) the number of bits in the swap pte as defined by the - * different architectures. In order to find the - * largest possible bit mask, a swap entry with swap type 0 - * and swap offset ~0UL is created, encoded to a swap pte, - * decoded to a swp_entry_t again, and finally the swap - * offset is extracted. This will mask all the bits from - * the initial ~0UL mask that can't be encoded in either - * the swp_entry_t or the architecture definition of a - * swap pte. - */ - maxpages = swp_offset(pte_to_swp_entry( - swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; + maxpages = max_swapfile_size(); last_page = swap_header->info.last_page; if (!last_page) { pr_warn("Empty swap-file\n"); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 795e920a3281939f8f84e76f9f3fc6161146a558..81fe3949c158819444808407f68cf2bf32494a8d 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -393,7 +393,8 @@ static void sco_sock_cleanup_listen(struct sock *parent) */ static void sco_sock_kill(struct sock *sk) { - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %p state %d", sk, sk->sk_state); diff --git a/net/core/dev.c b/net/core/dev.c index 65609a7aa9f6f19417c521ca957d32ce34a9b3a3..a9b58052b01d0bd48d65d2b44f58a2c4521fc25b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4326,6 +4326,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev, #endif /* CONFIG_NETFILTER_INGRESS */ return 0; } +int (*embms_tm_multicast_recv)(struct sk_buff *skb) __rcu __read_mostly; +EXPORT_SYMBOL(embms_tm_multicast_recv); + +int (*athrs_fast_nat_recv)(struct sk_buff *skb, + struct packet_type *pt_temp) __rcu __read_mostly; +EXPORT_SYMBOL(athrs_fast_nat_recv); static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) { @@ -4335,6 +4341,8 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; + int (*embms_recv)(struct sk_buff *skb); + int (*fast_recv)(struct sk_buff *skb, struct packet_type *pt_temp); net_timestamp_check(!netdev_tstamp_prequeue, skb); @@ -4391,7 +4399,19 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) } #endif skb_reset_tc(skb); + embms_recv = rcu_dereference(embms_tm_multicast_recv); + if (embms_recv) + embms_recv(skb); + skip_classify: + fast_recv = rcu_dereference(athrs_fast_nat_recv); + if (fast_recv) { + if (fast_recv(skb, pt_prev)) { + ret = NET_RX_SUCCESS; + goto out; + } + } + if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca2d6cfb4cc95f66303ea5c482da2c664bb0a393..a2bfbeec4bdd9344914deb21c1f7bafe8a13f52e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -58,6 +58,7 @@ static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, struct net_device *dev); +static unsigned int neigh_probe_enable; #ifdef CONFIG_PROC_FS static const struct file_operations neigh_stat_seq_fops; #endif @@ -984,7 +985,11 @@ static void neigh_timer_handler(unsigned long arg) if (!mod_timer(&neigh->timer, next)) neigh_hold(neigh); } - if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { + + if (neigh_probe_enable) { + if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE | NUD_STALE)) + neigh_probe(neigh); + } else if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { neigh_probe(neigh); } else { out: @@ -1308,9 +1313,20 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl, { struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); - if (neigh) - neigh_update(neigh, lladdr, NUD_STALE, - NEIGH_UPDATE_F_OVERRIDE, 0); + if (neigh) { + if (neigh_probe_enable) { + if (!(neigh->nud_state == NUD_REACHABLE)) { + neigh_update(neigh, lladdr, NUD_STALE, + NEIGH_UPDATE_F_OVERRIDE, 0); + write_lock(&neigh->lock); + neigh_probe(neigh); + neigh_update_notify(neigh, 0); + } + } else { + neigh_update(neigh, lladdr, NUD_STALE, + NEIGH_UPDATE_F_OVERRIDE, 0); + } + } return neigh; } EXPORT_SYMBOL(neigh_event_ns); @@ -3165,6 +3181,12 @@ static struct neigh_sysctl_table { .extra2 = &int_max, .proc_handler = proc_dointvec_minmax, }, + [NEIGH_VAR_PROBE] = { + .procname = "neigh_probe", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, {}, }, }; @@ -3200,6 +3222,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; + t->neigh_vars[NEIGH_VAR_PROBE].data = &neigh_probe_enable; } if (handler) { diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 3887bc115762688e80288609af1a7ed76f11f540..fc31c02d616ccc6c0b46fbc23249f90cd1369ca2 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -228,14 +228,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); u32 cwnd = hc->tx_cwnd, restart_cwnd, iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); + s32 delta = now - hc->tx_lsndtime; hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); /* don't reduce cwnd below the initial window (IW) */ restart_cwnd = min(cwnd, iwnd); - cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto; - hc->tx_cwnd = max(cwnd, restart_cwnd); + while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd) + cwnd >>= 1; + hc->tx_cwnd = max(cwnd, restart_cwnd); hc->tx_cwnd_stamp = now; hc->tx_cwnd_used = 0; diff --git a/net/embms_kernel/Makefile b/net/embms_kernel/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c21480eb8a82e42b66f1b430dae9f585df1b768a --- /dev/null +++ b/net/embms_kernel/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for Embms Kernel module. +# + +KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build + +obj-m += embms_kernel.o + +ccflags-y += -D__CHECK_ENDIAN__ + +CDEFINES += -D__CHECK_ENDIAN__ + +KBUILD_CPPFLAGS += $(CDEFINES) + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean + diff --git a/net/embms_kernel/embms_kernel.c b/net/embms_kernel/embms_kernel.c new file mode 100644 index 0000000000000000000000000000000000000000..e905d25fb7015cf0a23d6fef527bf42bf1d96e61 --- /dev/null +++ b/net/embms_kernel/embms_kernel.c @@ -0,0 +1,1051 @@ +/************************************************************************* + * ----------------------------------------------------------------------- + * Copyright (c) 2013-2015, 2017, 2018 The Linux Foundation. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * ----------------------------------------------------------------------- + + * DESCRIPTION + * Main file for eMBMs Tunneling Module in kernel. + ************************************************************************* + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "embms_kernel.h" + +struct embms_info_internal embms_conf; + +/* Global structures used for tunneling. These include + * iphdr and udphdr which are appended to skbs for + * tunneling, net_device and tunnleing related + * structs and params + */ + +unsigned char hdr_buff[sizeof(struct iphdr) + sizeof(struct udphdr)]; +struct iphdr *iph_global; +struct udphdr *udph_global; +struct net_device *dev_global; + +static struct tmgi_to_clnt_info tmgi_to_clnt_map_tbl; + +/* handle_multicast_stream - packet forwarding + * function for multicast stream + * Main use case is for EMBMS Over Softap feature + */ + +static int handle_multicast_stream(struct sk_buff *skb) +{ + struct iphdr *iph; + struct udphdr *udph; + struct in_device *in_dev; + unsigned char *tmp_ptr = NULL; + struct sk_buff *skb_new = NULL; + struct sk_buff *skb_cpy = NULL; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *temp_tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + int hdr_size = sizeof(*udph) + sizeof(*iph) + ETH_HLEN; + + /* only IP packets */ + if (htons(ETH_P_IP) != skb->protocol) { + embms_error("Not an IP packet\n"); + return 0; + } + + if (embms_conf.embms_tunneling_status == TUNNELING_OFF) { + embms_debug("Tunneling Disabled. Can't process packets\n"); + return 0; + } + + if (unlikely(memcmp(skb->dev->name, embms_conf.embms_iface, + strlen(embms_conf.embms_iface)) != 0)) { + embms_error("Packet received on %s iface. NOT an EMBMS Iface\n", + skb->dev->name); + return 0; + } + + /* Check if dst ip of packet is same as multicast ip of any tmgi*/ + + iph = (struct iphdr *)skb->data; + udph = (struct udphdr *)(skb->data + sizeof(struct iphdr)); + + spin_lock_bh(&embms_conf.lock); + + list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr, + &tmgi_to_clnt_map_tbl.tmgi_list_ptr) { + temp_tmgi = list_entry(tmgi_entry_ptr, + struct tmgi_to_clnt_info, + tmgi_list_ptr); + + if (temp_tmgi->tmgi_multicast_addr == iph->daddr && + temp_tmgi->tmgi_port == udph->dest) + break; + } + + if (tmgi_entry_ptr == &tmgi_to_clnt_map_tbl.tmgi_list_ptr) { + embms_error("%s:", __func__); + embms_error("could not find matchin tmgi entry\n"); + spin_unlock_bh(&embms_conf.lock); + return 0; + } + + /* Found a matching tmgi entry. Realloc headroom to + * accommodate new Ethernet, IP and UDP header + */ + + skb_new = skb_realloc_headroom(skb, hdr_size); + if (unlikely(!skb_new)) { + embms_error("Can't allocate headroom\n"); + spin_unlock_bh(&embms_conf.lock); + return 0; + } + + /* push skb->data and copy IP and UDP headers*/ + + tmp_ptr = skb_push(skb_new, + sizeof(struct udphdr) + sizeof(struct iphdr)); + + iph = (struct iphdr *)tmp_ptr; + udph = (struct udphdr *)(tmp_ptr + sizeof(struct iphdr)); + + memcpy(tmp_ptr, hdr_buff, hdr_size - ETH_HLEN); + udph->len = htons(skb_new->len - sizeof(struct iphdr)); + iph->tot_len = htons(skb_new->len); + + list_for_each_safe(clnt_ptr, prev_clnt_ptr, + &temp_tmgi->client_list_head) { + temp_client = list_entry(clnt_ptr, + struct clnt_info, + client_list_ptr); + + /* Make a copy of skb_new with new IP and UDP header. + * We can't use skb_new or its clone here since we need to + * constantly change dst ip and dst port which is not possible + * for shared memory as is the case with skb_new. + */ + + skb_cpy = skb_copy(skb_new, GFP_ATOMIC); + if (unlikely(!skb_cpy)) { + embms_error("Can't copy skb\n"); + kfree_skb(skb_new); + return 0; + } + + iph = (struct iphdr *)skb_cpy->data; + udph = (struct udphdr *)(skb_cpy->data + sizeof(struct iphdr)); + + iph->id = htons(atomic_inc_return(&embms_conf.ip_ident)); + + /* Calculate checksum for new IP and UDP header*/ + + udph->dest = temp_client->port; + skb_cpy->csum = csum_partial((char *)udph, + ntohs(udph->len), + skb_cpy->csum); + + iph->daddr = temp_client->addr; + ip_send_check(iph); + + udph->check = 0; + udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + ntohs(udph->len), + IPPROTO_UDP, + skb_cpy->csum); + + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; + + if (unlikely(!dev_global)) { + embms_error("Global device NULL\n"); + kfree_skb(skb_cpy); + kfree_skb(skb_new); + return 0; + } + + /* update device info and add MAC header*/ + + skb_cpy->dev = dev_global; + + skb_cpy->dev->header_ops->create(skb_cpy, skb_cpy->dev, + ETH_P_IP, temp_client->dmac, + NULL, skb_cpy->len); + dev_queue_xmit(skb_cpy); + } + + spin_unlock_bh(&embms_conf.lock); + kfree_skb(skb_new); + return 1; +} + +static int check_embms_device(atomic_t *use_count) +{ + int ret; + + if (atomic_inc_return(use_count) == 1) { + ret = 0; + } else { + atomic_dec(use_count); + ret = -EBUSY; + } + return ret; +} + +static int embms_device_open(struct inode *inode, struct file *file) +{ + /*Check if the device is busy*/ + if (check_embms_device(&embms_conf.device_under_use)) { + embms_error("embms_tm_open : EMBMS device busy\n"); + return -EBUSY; + } + + try_module_get(THIS_MODULE); + return SUCCESS; +} + +static int embms_device_release(struct inode *inode, struct file *file) +{ + /* Reduce device use count before leaving*/ + embms_debug("Releasing EMBMS device..\n"); + atomic_dec(&embms_conf.device_under_use); + embms_conf.embms_tunneling_status = TUNNELING_OFF; + module_put(THIS_MODULE); + return SUCCESS; +} + +static struct tmgi_to_clnt_info *check_for_tmgi_entry(u32 addr, + u16 port) +{ + struct list_head *tmgi_ptr, *prev_tmgi_ptr; + struct tmgi_to_clnt_info *temp_tmgi = NULL; + + embms_debug("%s: mcast addr :%pI4, port %u\n", + __func__, &addr, ntohs(port)); + + list_for_each_safe(tmgi_ptr, + prev_tmgi_ptr, + &tmgi_to_clnt_map_tbl.tmgi_list_ptr) { + temp_tmgi = list_entry(tmgi_ptr, + struct tmgi_to_clnt_info, + tmgi_list_ptr); + + if (temp_tmgi->tmgi_multicast_addr == addr && + temp_tmgi->tmgi_port == port) { + embms_debug("%s:TMGI entry found\n", __func__); + return temp_tmgi; + } + } + return NULL; +} + +static struct clnt_info *chk_clnt_entry(struct tmgi_to_clnt_info *tmgi, + struct tmgi_to_clnt_info_update *clnt) +{ + struct list_head *clnt_ptr, *prev_clnt_ptr; + struct clnt_info *temp_client = NULL; + + embms_debug("check_for_client_entry: clnt addr :%pI4, port %u\n", + &clnt->client_addr, ntohs(clnt->client_port)); + + list_for_each_safe(clnt_ptr, + prev_clnt_ptr, + &tmgi->client_list_head) { + temp_client = list_entry(clnt_ptr, + struct clnt_info, + client_list_ptr); + if (temp_client->addr == clnt->client_addr && + temp_client->port == clnt->client_port) { + embms_debug("Clnt entry present\n"); + return temp_client; + } + } + return NULL; +} + +static int add_new_tmgi_entry(struct tmgi_to_clnt_info_update *info_update, + struct clnt_info *clnt) +{ + struct tmgi_to_clnt_info *new_tmgi = NULL; + + embms_debug("%s:Enter\n", __func__); + + new_tmgi = kzalloc(sizeof(*new_tmgi), + GFP_ATOMIC); + if (!new_tmgi) { + embms_error("%s: mem alloc failed\n", __func__); + return -ENOMEM; + } + + memset(new_tmgi, 0, sizeof(struct tmgi_to_clnt_info)); + + new_tmgi->tmgi_multicast_addr = info_update->multicast_addr; + new_tmgi->tmgi_port = info_update->multicast_port; + + embms_debug("%s:", __func__); + embms_debug("New tmgi multicast addr :%pI4 , port %u\n", + &info_update->multicast_addr, + ntohs(info_update->multicast_port)); + + embms_debug("%s:Adding client entry\n", __func__); + + spin_lock_bh(&embms_conf.lock); + + INIT_LIST_HEAD(&new_tmgi->client_list_head); + list_add(&clnt->client_list_ptr, + &new_tmgi->client_list_head); + new_tmgi->no_of_clients++; + + /* Once above steps are done successfully, + * we add tmgi entry to our local table + */ + + list_add(&new_tmgi->tmgi_list_ptr, + &tmgi_to_clnt_map_tbl.tmgi_list_ptr); + embms_conf.no_of_tmgi_sessions++; + + spin_unlock_bh(&embms_conf.lock); + + return SUCCESS; +} + +static void print_tmgi_to_client_table(void) +{ + int i, j; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *temp_tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + + embms_debug("====================================================\n"); + embms_debug("Printing TMGI to Client Table :\n"); + embms_debug("No of Active TMGIs : %d\n", + embms_conf.no_of_tmgi_sessions); + embms_debug("====================================================\n\n"); + + if (embms_conf.no_of_tmgi_sessions > 0) { + i = 1; + list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr, + &tmgi_to_clnt_map_tbl.tmgi_list_ptr) { + temp_tmgi = list_entry(tmgi_entry_ptr, + struct tmgi_to_clnt_info, + tmgi_list_ptr); + + embms_debug("TMGI entry %d :\n", i); + embms_debug("TMGI multicast addr : %pI4 , port %u\n\n", + &temp_tmgi->tmgi_multicast_addr, + ntohs(temp_tmgi->tmgi_port)); + embms_debug("No of clients : %d\n", + temp_tmgi->no_of_clients); + j = 1; + + list_for_each_safe(clnt_ptr, prev_clnt_ptr, + &temp_tmgi->client_list_head) { + temp_client = list_entry(clnt_ptr, + struct clnt_info, + client_list_ptr); + embms_debug("Client entry %d :\n", j); + embms_debug("client addr : %pI4 , port %u\n\n", + &temp_client->addr, + ntohs(temp_client->port)); + j++; + } + i++; + embms_debug("===========================================\n\n"); + } + } else { + embms_debug("No TMGI entries to Display\n"); + } + embms_debug("==================================================================\n\n"); +} + +/** + * delete_tmgi_entry_from_table() - deletes tmgi from global tmgi-client table + * @buffer: Buffer containing TMGI info for deletion. + * + * This function completely removes the TMGI from + * global TMGI-client table, along with the client list + * so that no packets for this TMGI are processed + * + * Return: Success on deleting TMGI entry, error otherwise. + */ + +int delete_tmgi_entry_from_table(char *buffer) +{ + int i; + struct tmgi_to_clnt_info_update *info_update; + char message_buffer[sizeof(struct tmgi_to_clnt_info_update)]; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *temp_tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + + embms_debug("%s: Enter\n", __func__); + + info_update = (struct tmgi_to_clnt_info_update *)buffer; + + if (!info_update) { + embms_error("%s:", __func__); + embms_error("NULL arguments passed\n"); + return -EBADPARAM; + } + + /* This function is used to delete a specific TMGI entry + * when that particular TMGI goes down + * Search for the TMGI entry in our local table + */ + if (embms_conf.no_of_tmgi_sessions == 0) { + embms_error("TMGI count 0. Nothing to delete\n"); + return SUCCESS; + } + + temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr, + info_update->multicast_port); + + if (!temp_tmgi) { + /* TMGI entry was not found in our local table*/ + embms_error("%s :", __func__); + embms_error("Desired TMGI entry not found\n"); + return -EBADPARAM; + } + + spin_lock_bh(&embms_conf.lock); + + /* We need to free memory allocated to client entries + * for a particular TMGI entry + */ + + list_for_each_safe(clnt_ptr, prev_clnt_ptr, + &temp_tmgi->client_list_head) { + temp_client = list_entry(clnt_ptr, + struct clnt_info, + client_list_ptr); + embms_debug("%s :", __func__); + embms_debug("Client addr to delete :%pI4 , port %u\n", + &temp_client->addr, ntohs(temp_client->port)); + list_del(&temp_client->client_list_ptr); + temp_tmgi->no_of_clients--; + kfree(temp_client); + } + + /* Free memory allocated to tmgi entry*/ + + list_del(&temp_tmgi->tmgi_list_ptr); + kfree(temp_tmgi); + embms_conf.no_of_tmgi_sessions--; + + spin_unlock_bh(&embms_conf.lock); + + embms_debug("%s : TMGI Entry deleted.\n", __func__); + + return SUCCESS; +} + +/** + * delete_client_entry_from_all_tmgi() - deletes client from all tmgi lists + * @buffer: Buffer containing client info for deletion. + * + * This function completely removes a client from + * all TMGIs in global TMGI-client table. Also delets TMGI + * entries if no more clients are there + * + * Return: Success on deleting client entry, error otherwise. + */ +int delete_client_entry_from_all_tmgi(char *buffer) +{ + int i; + struct tmgi_to_clnt_info_update *info_update; + char message_buffer[sizeof(struct tmgi_to_clnt_info_update)]; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + + /* We use this function when we want to delete any + * client entry from all TMGI entries. This scenario + * happens when any client disconnects and hence + * we need to clean all realted client entries + * in our mapping table + */ + + embms_debug("del_clnt_from_all_tmgi: Enter\n"); + + info_update = (struct tmgi_to_clnt_info_update *)buffer; + + if (!info_update) { + embms_error("del_clnt_from_all_tmgi:"); + embms_error("NULL arguments passed\n"); + return -EBADPARAM; + } + + /* We start checking from first TMGI entry and if client + * entry is found in client entries of any TMGI, we clean + * up that client entry from that TMGI entry + */ + if (embms_conf.no_of_tmgi_sessions == 0) + return SUCCESS; + + list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr, + &tmgi_to_clnt_map_tbl.tmgi_list_ptr) { + tmgi = list_entry(tmgi_entry_ptr, + struct tmgi_to_clnt_info, + tmgi_list_ptr); + + temp_client = chk_clnt_entry(tmgi, info_update); + if (!temp_client) + continue; + + spin_lock_bh(&embms_conf.lock); + + list_del(&temp_client->client_list_ptr); + tmgi->no_of_clients--; + kfree(temp_client); + + spin_unlock_bh(&embms_conf.lock); + + temp_client = NULL; + + if (tmgi->no_of_clients == 0) { + /* Deleted clnt was the only clnt for + * that TMGI we need to delete TMGI + * entry from table + */ + embms_debug("del_clnt_from_all_tmgi:"); + embms_debug("Deleted client was "); + embms_debug("last client for tmgi\n"); + embms_debug("del_clnt_from_all_tmgi:"); + embms_debug("Delting tmgi as it has "); + embms_debug("zero clients.TMGI IP "); + embms_debug(":%pI4 , port %u\n", + &tmgi->tmgi_multicast_addr, + ntohs(tmgi->tmgi_port)); + + spin_lock_bh(&embms_conf.lock); + + list_del(&tmgi->tmgi_list_ptr); + embms_conf.no_of_tmgi_sessions--; + kfree(tmgi); + + spin_unlock_bh(&embms_conf.lock); + + embms_debug("del_clnt_from_all_tmgi:"); + embms_debug("TMGI entry deleted\n"); + } + } + + embms_debug("del_clnt_from_all_tmgi Successful\n"); + return SUCCESS; +} + +/** + * add_client_entry_to_table() - add client entry to specified TMGI + * @buffer: Buffer containing client info for addition. + * + * This function adds a client to the specified TMGI in + * the global TMGI-client table. If TMGI entry is not + * present, it adds a new TMGI entry and adds client + * entry to it. + * + * Return: Success on adding client entry, error otherwise. + */ +int add_client_entry_to_table(char *buffer) +{ + int i, ret; + struct tmgi_to_clnt_info_update *info_update; + char message_buffer[sizeof(struct tmgi_to_clnt_info_update)]; + struct clnt_info *new_client = NULL; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *new_tmgi = NULL; + struct tmgi_to_clnt_info *tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + struct neighbour *neigh_entry; + struct in_device *iface_dev; + struct in_ifaddr *iface_info; + + embms_debug("%s: Enter\n", __func__); + + info_update = (struct tmgi_to_clnt_info_update *)buffer; + + if (!info_update) { + embms_error("%s:", __func__); + embms_error("NULL arguments passed\n"); + return -EBADPARAM; + } + + new_client = kzalloc(sizeof(*new_client), GFP_ATOMIC); + if (!new_client) { + embms_error("%s:", __func__); + embms_error("Cannot allocate memory\n"); + return -ENOMEM; + } + + new_client->addr = info_update->client_addr; + new_client->port = info_update->client_port; + + neigh_entry = __ipv4_neigh_lookup(dev_global, + (u32)(new_client->addr)); + if (!neigh_entry) { + embms_error("%s :", __func__); + embms_error("Can't find neighbour entry\n"); + kfree(new_client); + return -EBADPARAM; + } + + ether_addr_copy(new_client->dmac, neigh_entry->ha); + + embms_debug("DMAC of client : %pM\n", new_client->dmac); + + embms_debug("%s:", __func__); + embms_debug("New client addr :%pI4 , port %u\n", + &info_update->client_addr, + ntohs(info_update->client_port)); + + if (embms_conf.no_of_tmgi_sessions == 0) { + /* TMGI Client mapping table is empty. + * First client entry is being added + */ + + embms_debug("tmgi_to_clnt_map_tbl is empty\n"); + + ret = add_new_tmgi_entry(info_update, new_client); + if (ret != SUCCESS) { + kfree(new_client); + new_client = NULL; + } + + goto exit_add; + } + + /* In this case, table already has some entries + * and we need to search for the specific tmgi entry + * for which client entry is to be added + */ + + tmgi = check_for_tmgi_entry(info_update->multicast_addr, + info_update->multicast_port); + if (tmgi) { + if (chk_clnt_entry(tmgi, info_update)) { + kfree(new_client); + return -ENOEFFECT; + } + + /* Adding client to the client list + * for the specified TMGI + */ + + spin_lock_bh(&embms_conf.lock); + + list_add(&new_client->client_list_ptr, + &tmgi->client_list_head); + tmgi->no_of_clients++; + + spin_unlock_bh(&embms_conf.lock); + + ret = SUCCESS; + } else { + /* TMGI specified in the message was not found in + * mapping table.Hence, we need to add a new entry + * for this TMGI and add the specified client to the client + * list + */ + + embms_debug("TMGI entry not present. Adding tmgi entry\n"); + + ret = add_new_tmgi_entry(info_update, new_client); + if (ret != SUCCESS) { + kfree(new_client); + new_client = NULL; + } + } + +exit_add: + return ret; +} + +/** + * delete_client_entry_from_table() - delete client entry from specified TMGI + * @buffer: Buffer containing client info for deletion. + * + * This function deletes a client from the specified TMGI in + * the global TMGI-client table. If this was the last client + * entry, it also deletes the TMGI entry. + * + * Return: Success on deleting client entry, error otherwise. + */ +int delete_client_entry_from_table(char *buffer) +{ + int i; + struct tmgi_to_clnt_info_update *info_update; + char message_buffer[sizeof(struct tmgi_to_clnt_info_update)]; + struct clnt_info *temp_client = NULL; + struct tmgi_to_clnt_info *temp_tmgi = NULL; + struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr; + struct list_head *clnt_ptr, *prev_clnt_ptr; + + embms_debug("%s: Enter\n", __func__); + + info_update = (struct tmgi_to_clnt_info_update *)buffer; + + if (!info_update) { + embms_error("%s:", __func__); + embms_error("NULL arguments passed\n"); + return -EBADPARAM; + } + + /* Search for the TMGI entry*/ + if (embms_conf.no_of_tmgi_sessions == 0) + return SUCCESS; + + temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr, + info_update->multicast_port); + + if (!temp_tmgi) { + embms_error("%s:TMGI not found\n", __func__); + return -EBADPARAM; + } + /* Delete client entry for a specific tmgi*/ + + embms_debug("%s:clnt addr :%pI4,port %u\n", + __func__, &info_update->client_addr, + ntohs(info_update->client_port)); + + temp_client = chk_clnt_entry(temp_tmgi, info_update); + + if (!temp_client) { + /* Specified client entry was not found in client list + * of specified TMGI + */ + embms_error("%s:Clnt not found\n", __func__); + return -EBADPARAM; + } + + spin_lock_bh(&embms_conf.lock); + + list_del(&temp_client->client_list_ptr); + temp_tmgi->no_of_clients--; + + spin_unlock_bh(&embms_conf.lock); + + kfree(temp_client); + temp_client = NULL; + + embms_debug("%s:Client entry deleted\n, __func__"); + + if (temp_tmgi->no_of_clients == 0) { + /* If deleted client was the only client for that TMGI + * we need to delete TMGI entry from table + */ + embms_debug("%s:", __func__); + embms_debug("Deleted client was the last client for tmgi\n"); + embms_debug("%s:", __func__); + embms_debug("Deleting tmgi since it has zero clients\n"); + + spin_lock_bh(&embms_conf.lock); + + list_del(&temp_tmgi->tmgi_list_ptr); + embms_conf.no_of_tmgi_sessions--; + kfree(temp_tmgi); + + spin_unlock_bh(&embms_conf.lock); + + embms_debug("%s: TMGI deleted\n", __func__); + } + + if (embms_conf.no_of_tmgi_sessions == 0) + embms_conf.embms_tunneling_status = TUNNELING_OFF; + + return SUCCESS; +} + +/** + * embms_device_ioctl() - handle IOCTL calls to device + * @file: File descriptor of file opened from userspace process + * @ioctl_num: IOCTL to use + * @ioctl_param: IOCTL parameters/arguments + * + * This function is called whenever a process tries to do + * an ioctl on our device file. As per the IOCTL number, + * it calls various functions to manipulate global + * TMGI-client table + * + * Return: Success if functoin call returns SUCCESS, error otherwise. + */ + +long embms_device_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + int i, error; + long ret; + char *temp; + char buffer[BUF_LEN]; + struct in_device *iface_dev; + struct in_ifaddr *iface_info; + struct tmgi_to_clnt_info_update *info_update; + char __user *argp = (char __user *)ioctl_param; + + memset(buffer, 0, BUF_LEN); + + /* Switch according to the ioctl called*/ + switch (ioctl_num) { + case ADD_EMBMS_TUNNEL: + if (copy_from_user(buffer, argp, + sizeof(struct tmgi_to_clnt_info_update))) + return -EFAULT; + + ret = add_client_entry_to_table(buffer); + print_tmgi_to_client_table(); + break; + + case DEL_EMBMS_TUNNEL: + if (copy_from_user(buffer, argp, + sizeof(struct tmgi_to_clnt_info_update))) + return -EFAULT; + + ret = delete_client_entry_from_table(buffer); + print_tmgi_to_client_table(); + break; + + case TMGI_DEACTIVATE: + if (copy_from_user(buffer, argp, + sizeof(struct tmgi_to_clnt_info_update))) + return -EFAULT; + + ret = delete_tmgi_entry_from_table(buffer); + print_tmgi_to_client_table(); + break; + + case CLIENT_DEACTIVATE: + if (copy_from_user(buffer, argp, + sizeof(struct tmgi_to_clnt_info_update))) + return -EFAULT; + + ret = delete_client_entry_from_all_tmgi(buffer); + print_tmgi_to_client_table(); + break; + + case GET_EMBMS_TUNNELING_STATUS: + /* This ioctl is both input (ioctl_param) and + * output (the return value of this function) + */ + embms_debug("Sending tunneling status : %d\n", + embms_conf.embms_tunneling_status); + ret = embms_conf.embms_tunneling_status; + break; + + case START_EMBMS_TUNNEL: + + if (copy_from_user(buffer, argp, + sizeof(struct tmgi_to_clnt_info_update))) + return -EFAULT; + + info_update = (struct tmgi_to_clnt_info_update *)buffer; + embms_conf.embms_data_port = info_update->data_port; + udph_global->source = embms_conf.embms_data_port; + + memset(embms_conf.embms_iface, 0, EMBMS_MAX_IFACE_NAME); + memcpy(embms_conf.embms_iface, info_update->iface_name, + EMBMS_MAX_IFACE_NAME); + + embms_conf.embms_tunneling_status = TUNNELING_ON; + embms_debug("Starting Tunneling. Embms_data_port = %d\n", + ntohs(embms_conf.embms_data_port)); + embms_debug("Embms Data Iface = %s\n", embms_conf.embms_iface); + ret = SUCCESS; + + /*Initialise dev_global to bridge device*/ + dev_global = __dev_get_by_name(&init_net, BRIDGE_IFACE); + if (!dev_global) { + embms_error("Error in getting device info\n"); + ret = FAILURE; + } else { + iface_dev = (struct in_device *)dev_global->ip_ptr; + iface_info = iface_dev->ifa_list; + while (iface_info) { + if (memcmp(iface_info->ifa_label, + BRIDGE_IFACE, + strlen(BRIDGE_IFACE)) == 0) + break; + + iface_info = iface_info->ifa_next; + } + if (iface_info) { + embms_debug("IP address of %s iface is %pI4\n", + BRIDGE_IFACE, + &iface_info->ifa_address); + /*Populate source addr for header*/ + iph_global->saddr = iface_info->ifa_address; + ret = SUCCESS; + } else { + embms_debug("Could not find iface address\n"); + ret = FAILURE; + } + } + + break; + + case STOP_EMBMS_TUNNEL: + + embms_conf.embms_tunneling_status = TUNNELING_OFF; + embms_debug("Stopped Tunneling..\n"); + ret = SUCCESS; + break; + } + + return ret; +} + +/* Module Declarations + * This structure will hold the functions to be called + * when a process does something to the device we + * created. Since a pointer to this structure is kept in + * the devices table, it can't be local to + * init_module. NULL is for unimplemented functions. + */ +static const struct file_operations embms_device_fops = { + .owner = THIS_MODULE, + .open = embms_device_open, + .release = embms_device_release, + .read = NULL, + .write = NULL, + .unlocked_ioctl = embms_device_ioctl, +}; + +static int embms_ioctl_init(void) +{ + int ret; + struct device *dev; + + ret = alloc_chrdev_region(&device, 0, dev_num, EMBMS_DEVICE_NAME); + if (ret) { + embms_error("device_alloc err\n"); + goto dev_alloc_err; + } + + embms_class = class_create(THIS_MODULE, EMBMS_DEVICE_NAME); + if (IS_ERR(embms_class)) { + embms_error("class_create err\n"); + goto class_err; + } + + dev = device_create(embms_class, NULL, device, + &embms_conf, EMBMS_DEVICE_NAME); + if (IS_ERR(dev)) { + embms_error("device_create err\n"); + goto device_err; + } + + cdev_init(&embms_device, &embms_device_fops); + ret = cdev_add(&embms_device, device, dev_num); + if (ret) { + embms_error("cdev_add err\n"); + goto cdev_add_err; + } + + embms_debug("ioctl init OK!!\n"); + return 0; + +cdev_add_err: + device_destroy(embms_class, device); +device_err: + class_destroy(embms_class); +class_err: + unregister_chrdev_region(device, dev_num); +dev_alloc_err: + return -ENODEV; +} + +static void embms_ioctl_deinit(void) +{ + cdev_del(&embms_device); + device_destroy(embms_class, device); + class_destroy(embms_class); + unregister_chrdev_region(device, dev_num); +} + +/*Initialize the module - Register the misc device*/ +static int __init start_embms(void) +{ + int ret = 0; + + iph_global = (struct iphdr *)hdr_buff; + udph_global = (struct udphdr *)(hdr_buff + sizeof(struct iphdr)); + + embms_conf.embms_tunneling_status = TUNNELING_OFF; + embms_conf.no_of_tmgi_sessions = 0; + embms_conf.embms_data_port = 0; + atomic_set(&embms_conf.device_under_use, 0); + atomic_set(&embms_conf.ip_ident, 0); + spin_lock_init(&embms_conf.lock); + + embms_debug("Registering embms device\n"); + + ret = embms_ioctl_init(); + if (ret) { + embms_error("embms device failed to register"); + goto fail_init; + } + + INIT_LIST_HEAD(&tmgi_to_clnt_map_tbl.tmgi_list_ptr); + + memset(hdr_buff, 0, sizeof(struct udphdr) + sizeof(struct iphdr)); + udph_global->check = UDP_CHECKSUM; + iph_global->version = IP_VERSION; + iph_global->ihl = IP_IHL; + iph_global->tos = IP_TOS; + iph_global->frag_off = IP_FRAG_OFFSET; + iph_global->ttl = IP_TTL; + iph_global->protocol = IPPROTO_UDP; + + dev_global = NULL; + + if (!embms_tm_multicast_recv) + RCU_INIT_POINTER(embms_tm_multicast_recv, + handle_multicast_stream); + + return ret; + +fail_init: + embms_ioctl_deinit(); + return ret; +} + +/*Cleanup - unregister the appropriate file from proc*/ + +static void __exit stop_embms(void) +{ + embms_ioctl_deinit(); + + if (rcu_dereference(embms_tm_multicast_recv)) + RCU_INIT_POINTER(embms_tm_multicast_recv, NULL); + + embms_debug("unregister_chrdev done\n"); +} + +module_init(start_embms); +module_exit(stop_embms); +MODULE_LICENSE("GPL v2"); diff --git a/net/embms_kernel/embms_kernel.h b/net/embms_kernel/embms_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..f809d49e6f5d7278dcddbbb472678ccba7601215 --- /dev/null +++ b/net/embms_kernel/embms_kernel.h @@ -0,0 +1,233 @@ +/****************************************************************** + * Copyright (c) 2013-2015,2017, 2018 The Linux Foundation. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + *--------------------------------------------------------------- + + * DESCRIPTION + * Header file for eMBMs Tunneling Module in kernel. + ******************************************************************* + */ + +#ifndef EMBMS_H +#define EMBMS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define EMBMS_MAX_IFACE_NAME 20 + +/* Defining IP and UDP header related macros*/ + +#define UDP_CHECKSUM 0 +#define IP_VERSION 4 +#define IP_IHL 5 +#define IP_TOS 0 +#define IP_ID 1 +#define IP_FRAG_OFFSET htons(0x4000) +#define IP_TTL 64 +#define BRIDGE_IFACE "bridge0" + +#define BUF_LEN 1024 +#define TUNNELING_ON 1 +#define TUNNELING_OFF 0 + +// definitions required for IOCTL +static unsigned int dev_num = 1; +/* Embms device used for communication*/ +struct cdev embms_device; +static struct class *embms_class; +static dev_t device; +#define EMBMS_IOC_MAGIC 0x64 + +#define embms_debug pr_debug +#define embms_error pr_debug + +/* The name of the device file*/ +#define EMBMS_DEVICE_NAME "embms_tm_device" + +extern int (*embms_tm_multicast_recv)(struct sk_buff *skb); + +/** + * enum embms_action_type - Describes action to perform + * @ADD_CLIENT_ENTRY: add client entry to TMGI + * @DELETE_CLIENT_ENTRY: deelte client entry from TMGI + * @TMGI_DEACTIVATE: Delete TMGI entry + * @CLIENT_ACTIVATE_ALL_TMGI: Add client to all TMGI + * @CLIENT_DEACTIVATE_ALL_TMGI: Delete client from all TMGI + * @SESSION_DEACTIVATE: Stop session + * @SOCK_INFO: Socket information like V4 addr, port etc + * + * This enum defines the types of action which are + * supported by this module. + */ + +enum { + ADD_CLIENT_ENTRY = 0, + DELETE_CLIENT_ENTRY, + TMGI_DEACTIVATE, + CLIENT_ACTIVATE_ALL_TMGI, + CLIENT_DEACTIVATE_ALL_TMGI, + SESSION_DEACTIVATE, + SOCK_INFO +} embms_action_type; + +/** + * struct tmgi_to_clnt_info_update - information for addition/deletion + * @multicast_addr: TMGI multicast IP to receive data + * @multicast_port: TMGI multicast port to receive date + * @client_addr: Client IPV4 address for sending data + * @client_port: Client port for sending data + * @data_port: port used to send data to client + * @action_type: Action to be performed + * @iface_name: iface to listen to for data + * + * This structure contains information as to what action + * needs to be performed on TMGI-client table. It is + * sent as a parameter during an IOCTL call + */ + +struct tmgi_to_clnt_info_update { + u32 multicast_addr; + u16 multicast_port; + u32 client_addr; + u16 client_port; + u16 data_port; + u32 action_type; + char iface_name[EMBMS_MAX_IFACE_NAME]; +}; + +/** + * struct clnt_info - contains client information + * @addr: Client IPV4 address for sending packets + * @port: Client port for sending packets + * @dmac: Client DMAC address + * @client_list_ptr : list ptr used to maintain client list + * + * This structure maintains complete client information + * to be used when sending packets to client + */ + +struct clnt_info { + u32 addr; + u16 port; + u8 dmac[ETH_ALEN]; + struct list_head client_list_ptr; +}; + +/** + * struct tmgi_to_clnt_info - contains TMGI information + * @tmgi_multicast_addr: TMGI IPV4 address to listen for packets + * @tmgi_port: Client port to listen for packets + * @no_of_clients: No of clients for a TMGI + * @client_list_head : list head for client list + * @tmgi_list_ptr : list ptr to maintain tmgi list + * + * This structure maintains complete client information + * to be used when sending data to client + */ + +struct tmgi_to_clnt_info { + u32 tmgi_multicast_addr; + u16 tmgi_port; + u16 no_of_clients; + struct list_head client_list_head; + struct list_head tmgi_list_ptr; +}; + +/** + * struct embms_info_internal - stores module specific params + * @device_under_use: Used to prevent concurent access to the same device + * @embms_data_port: Source Data port used for tunnelled packets + * @embms_iface: Iface to receive embms traffic + * @embms_tunneling_status : Current EMBMS Status + * @no_of_tmgi_sessions : Number of current active TMGI sessions + * @lock : Lock for concurrency scenarios + * @ip_ident : IP identification number to be used for sent packets + * + * This tructure holds module specific information which is + * used throughout the module to maintain consistency + */ + +struct embms_info_internal { + atomic_t device_under_use; + int embms_data_port; + char embms_iface[EMBMS_MAX_IFACE_NAME]; + int embms_tunneling_status; + int no_of_tmgi_sessions; + /*lock to prevent concurrent access*/ + spinlock_t lock; + atomic_t ip_ident; +}; + +/* This ioctl is used to add a new client entry to tunneling module. + * Entry params are populated in the struct used for ioctl + */ + +#define ADD_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 0, \ + struct tmgi_to_clnt_info_update) + +/* This ioctl is used to delete a client entry for a particular + * TMGI from tunneling module. + * Entry params are populated in the struct used for ioctl + */ + +#define DEL_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 1, \ + struct tmgi_to_clnt_info_update) + +/* This ioctl is used to delete a TMGI entry completely + * from tunneling module. + * Entry params are populated in the struct used for ioctl + */ + +#define TMGI_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 2, \ + struct tmgi_to_clnt_info_update) + +/* This ioctl is used to delete client entry completely + * from tunneling module. + * Entry params are populated in the struct used for ioctl + */ + +#define CLIENT_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 3, \ + struct tmgi_to_clnt_info_update) + +/* Gets the ON/OFF status of Tunneling module*/ + +#define GET_EMBMS_TUNNELING_STATUS _IO(EMBMS_IOC_MAGIC, 4) + +/* Used to start tunneling. Argument is the port + * number to be used to send + * data to clients + */ + +#define START_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 5, \ + struct tmgi_to_clnt_info_update) + +/* Used to stop tunnleing*/ + +#define STOP_EMBMS_TUNNEL _IO(EMBMS_IOC_MAGIC, 6) + +/* Return values indicating error status*/ +#define SUCCESS 0 /* Successful operation*/ +#define FAILURE -1 /* Unsuccessful operation*/ + +/* Error Condition Values*/ +#define ENOMEM -2 /* Out of memory*/ +#define EBADPARAM -3 /* Incorrect parameters passed*/ +#define ENOEFFECT -4 /* No Effect*/ + +#endif + diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 2171324cdb5c95667d01a2edb98651c09128f569..4c47a47c66bd3dfad64099de217278d78db1abea 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -45,6 +45,10 @@ static int tcp_syn_retries_min = 1; static int tcp_syn_retries_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; +static int tcp_delack_seg_min = TCP_DELACK_MIN; +static int tcp_delack_seg_max = 60; +static int tcp_use_userconfig_min; +static int tcp_use_userconfig_max = 1; /* obsolete */ static int sysctl_tcp_low_latency __read_mostly; @@ -784,6 +788,25 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &one }, + { + .procname = "tcp_delack_seg", + .data = &sysctl_tcp_delack_seg, + .maxlen = sizeof(sysctl_tcp_delack_seg), + .mode = 0644, + .proc_handler = tcp_proc_delayed_ack_control, + .extra1 = &tcp_delack_seg_min, + .extra2 = &tcp_delack_seg_max, + }, + { + .procname = "tcp_use_userconfig", + .data = &sysctl_tcp_use_userconfig, + .maxlen = sizeof(sysctl_tcp_use_userconfig), + .mode = 0644, + .proc_handler = tcp_use_userconfig_sysctl_handler, + .extra1 = &tcp_use_userconfig_min, + .extra2 = &tcp_use_userconfig_max, + }, + { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e81ff9d545a401db0c8892ec93a45fe10e4c4996..e41ef7b318969690f26fbd6db8de9189843b2db0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -300,6 +300,12 @@ EXPORT_SYMBOL(sysctl_tcp_wmem); atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ EXPORT_SYMBOL(tcp_memory_allocated); +int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG; +EXPORT_SYMBOL(sysctl_tcp_delack_seg); + +int sysctl_tcp_use_userconfig __read_mostly; +EXPORT_SYMBOL(sysctl_tcp_use_userconfig); + /* * Current number of TCP sockets. */ @@ -1539,8 +1545,11 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied) /* Delayed ACKs frequently hit locked sockets during bulk * receive. */ if (icsk->icsk_ack.blocked || - /* Once-per-two-segments ACK was not sent by tcp_input.c */ - tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || + /* Once-per-sysctl_tcp_delack_seg segments + * ACK was not sent by tcp_input.c + */ + tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) * + sysctl_tcp_delack_seg || /* * If this read emptied read buffer, we send ACK, if * connection is not bidirectional, user drained diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9466531062039d0242ece30654a6a8ff2277cbf0..b9430d373620cf52a3781584957d5c70aa2f7b5c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5122,7 +5122,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ - if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && + if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) * + sysctl_tcp_delack_seg && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a845b7692c1b0d0cce8f68345db8d4f3fa812c68..f657bc31b26c2302994a72eb8dcb855ba266b6bf 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -24,13 +24,45 @@ int sysctl_tcp_thin_linear_timeouts __read_mostly; +static void set_tcp_default(void) +{ + sysctl_tcp_delack_seg = TCP_DELACK_SEG; +} + +/*sysctl handler for tcp_ack realted master control */ +int tcp_proc_delayed_ack_control(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, length, ppos); + + /* The ret value will be 0 if the input validation is successful + * and the values are written to sysctl table. If not, the stack + * will continue to work with currently configured values + */ + return ret; +} + +/*sysctl handler for tcp_ack realted master control */ +int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, length, ppos); + + if (write && ret == 0) { + if (!sysctl_tcp_use_userconfig) + set_tcp_default(); + } + return ret; +} + /** * tcp_write_err() - close socket and save error info * @sk: The socket the error has appeared on. * * Returns: Nothing (void) */ - static void tcp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 84ee2eb88121aa3a5add4004aba875adf7f58010..ee8dbd228fe2b82a307f31e769cdb6f03cb55fa7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1134,12 +1134,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, max_headroom += 8; mtu -= 8; } - if (skb->protocol == htons(ETH_P_IPV6)) { - if (mtu < IPV6_MIN_MTU) - mtu = IPV6_MIN_MTU; - } else if (mtu < 576) { - mtu = 576; - } + mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? + IPV6_MIN_MTU : IPV4_MIN_MTU); skb_dst_update_pmtu(skb, mtu); if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 316869df91e8e33335dfb66afc3f90306efbc31e..5c87f1d3e525c5478b18217f1d949bffd79b7a02 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1211,7 +1211,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len /* Get routing info from the tunnel socket */ skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); + skb_dst_set(skb, sk_dst_check(sk, 0)); inet = inet_sk(sk); fl = &inet->cork.fl; diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 89041260784c0871195556074f7138eee01edb33..260b3dc1b4a2ab4545982b0b36478d6a6cf0b71f 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value) rcu_read_lock_bh(); sap = __llc_sap_find(sap_value); - if (sap) - llc_sap_hold(sap); + if (!sap || !llc_sap_hold_safe(sap)) + sap = NULL; rcu_read_unlock_bh(); return sap; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5a76c71f718ca90c84387896ac6f40268d529ada..8242e17440596fa2b440ccb03166d34dbcfc9fa4 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -406,11 +406,15 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl) } EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); +void (*delete_sfe_entry)(struct nf_conn *ct) __rcu __read_mostly; +EXPORT_SYMBOL(delete_sfe_entry); + static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; const struct nf_conntrack_l4proto *l4proto; + void (*delete_entry)(struct nf_conn *ct); pr_debug("destroy_conntrack(%pK)\n", ct); WARN_ON(atomic_read(&nfct->use) != 0); @@ -419,6 +423,13 @@ destroy_conntrack(struct nf_conntrack *nfct) nf_ct_tmpl_free(ct); return; } + + if (ct->sfe_entry) { + delete_entry = rcu_dereference(delete_sfe_entry); + if (delete_entry) + delete_entry(ct); + } + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->destroy) l4proto->destroy(ct); diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 5523acce9d6993dc71e467bbdf7b718ab2b25bf7..b637e37bb85f35224f1b3ef452c30fa378d5e64f 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,18 @@ static unsigned int max_dcc_channels = 8; static unsigned int dcc_timeout __read_mostly = 300; /* This is slow, but it's simple. --RR */ static char *irc_buffer; +struct irc_client_info { + char *nickname; + bool conn_to_server; + int nickname_len; + __be32 server_ip; + __be32 client_ip; + struct list_head ptr; + }; + +static struct irc_client_info client_list; + +static unsigned int no_of_clients; static DEFINE_SPINLOCK(irc_buffer_lock); unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, @@ -61,7 +74,7 @@ static const char *const dccprotos[] = { }; #define MINMATCHLEN 5 - +#define MINLENNICK 1 /* tries to get the ip_addr and port out of a dcc command * return value: -1 on failure, 0 on success * data pointer to first byte of DCC command data @@ -71,6 +84,23 @@ static const char *const dccprotos[] = { * ad_beg_p returns pointer to first byte of addr data * ad_end_p returns pointer to last byte of addr data */ +static struct irc_client_info *search_client_by_ip +( + struct nf_conntrack_tuple *tuple +) +{ + struct irc_client_info *temp, *ret = NULL; + struct list_head *obj_ptr, *prev_obj_ptr; + + list_for_each_safe(obj_ptr, prev_obj_ptr, &client_list.ptr) { + temp = list_entry(obj_ptr, struct irc_client_info, ptr); + if ((temp->client_ip == tuple->src.u3.ip) && + (temp->server_ip == tuple->dst.u3.ip)) + ret = temp; + } + return ret; +} + static int parse_dcc(char *data, const char *data_end, __be32 *ip, u_int16_t *port, char **ad_beg_p, char **ad_end_p) { @@ -105,6 +135,106 @@ static int parse_dcc(char *data, const char *data_end, __be32 *ip, return 0; } +static bool mangle_ip(struct nf_conn *ct, + int dir, char *nick_start) +{ + char *nick_end; + struct nf_conntrack_tuple *tuple; + struct irc_client_info *temp; + struct list_head *obj_ptr, *prev_obj_ptr; + + tuple = &ct->tuplehash[dir].tuple; + nick_end = nick_start; + while (*nick_end != ' ') + nick_end++; + list_for_each_safe(obj_ptr, prev_obj_ptr, + &client_list.ptr) { + temp = list_entry(obj_ptr, + struct irc_client_info, ptr); + /*If it is an internal client, + *do not mangle the DCC Server IP + */ + if ((temp->server_ip == tuple->dst.u3.ip) && + (temp->nickname_len == (nick_end - nick_start))) { + if (memcmp(nick_start, temp->nickname, + temp->nickname_len) == 0) + return false; + } + } + return true; +} + +static int handle_nickname(struct nf_conn *ct, + int dir, char *nick_start) +{ + char *nick_end; + struct nf_conntrack_tuple *tuple; + struct irc_client_info *temp; + int i, j; + bool add_entry = true; + + nick_end = nick_start; + i = 0; + while (*nick_end != '\n') { + nick_end++; + i++; + } + tuple = &ct->tuplehash[dir].tuple; + /*Check if the entry is already + * present for that client + */ + temp = search_client_by_ip(tuple); + if (temp) { + add_entry = false; + /*Update nickname if the client is not already + * connected to the server.If the client is + * connected, wait for server to confirm + * if nickname is valid + */ + if (!temp->conn_to_server) { + kfree(temp->nickname); + temp->nickname = + kmalloc(i, GFP_ATOMIC); + if (temp->nickname) { + temp->nickname_len = i; + memcpy(temp->nickname, + nick_start, temp->nickname_len); + } else { + list_del(&temp->ptr); + no_of_clients--; + kfree(temp); + } + } + } + /*Add client entry if not already present*/ + if (add_entry) { + j = sizeof(struct irc_client_info); + temp = kmalloc(j, GFP_ATOMIC); + if (temp) { + no_of_clients++; + tuple = &ct->tuplehash[dir].tuple; + temp->nickname_len = i; + temp->nickname = + kmalloc(temp->nickname_len, GFP_ATOMIC); + if (!temp->nickname) { + kfree(temp); + return NF_DROP; + } + memcpy(temp->nickname, nick_start, + temp->nickname_len); + memcpy(&temp->client_ip, + &tuple->src.u3.ip, sizeof(__be32)); + memcpy(&temp->server_ip, + &tuple->dst.u3.ip, sizeof(__be32)); + temp->conn_to_server = false; + list_add(&temp->ptr, + &client_list.ptr); + } else { + return NF_DROP; + } + } + return NF_ACCEPT; +} static int help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { @@ -113,7 +243,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, const struct tcphdr *th; struct tcphdr _tcph; const char *data_limit; - char *data, *ib_ptr; + char *data, *ib_ptr, *for_print, *nick_end; int dir = CTINFO2DIR(ctinfo); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; @@ -123,10 +253,8 @@ static int help(struct sk_buff *skb, unsigned int protoff, int i, ret = NF_ACCEPT; char *addr_beg_p, *addr_end_p; typeof(nf_nat_irc_hook) nf_nat_irc; - - /* If packet is coming from IRC server */ - if (dir == IP_CT_DIR_REPLY) - return NF_ACCEPT; + struct irc_client_info *temp; + bool mangle = true; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) @@ -150,79 +278,222 @@ static int help(struct sk_buff *skb, unsigned int protoff, data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff; - /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 - * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ - while (data < data_limit - (19 + MINMATCHLEN)) { - if (memcmp(data, "\1DCC ", 5)) { - data++; - continue; + /* If packet is coming from IRC server + * parse the packet for different type of + * messages (MOTD,NICK etc) and process + * accordingly + */ + if (dir == IP_CT_DIR_REPLY) { + /* strlen("NICK xxxxxx") + * 5+strlen("xxxxxx")=1 (minimum length of nickname) + */ + + while (data < data_limit - 6) { + if (memcmp(data, " MOTD ", 6)) { + data++; + continue; + } + /* MOTD message signifies successful + * registration with server + */ + tuple = &ct->tuplehash[!dir].tuple; + temp = search_client_by_ip(tuple); + if (temp && !temp->conn_to_server) + temp->conn_to_server = true; + ret = NF_ACCEPT; + goto out; } - data += 5; - /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ - iph = ip_hdr(skb); - pr_debug("DCC found in master %pI4:%u %pI4:%u\n", - &iph->saddr, ntohs(th->source), - &iph->daddr, ntohs(th->dest)); - - for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { - if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { - /* no match */ + /* strlen("NICK :xxxxxx") + * 6+strlen("xxxxxx")=1 (minimum length of nickname) + * Parsing the server reply to get nickname + * of the client + */ + data = ib_ptr; + data_limit = ib_ptr + skb->len - dataoff; + while (data < data_limit - (6 + MINLENNICK)) { + if (memcmp(data, "NICK :", 6)) { + data++; continue; } - data += strlen(dccprotos[i]); - pr_debug("DCC %s detected\n", dccprotos[i]); - - /* we have at least - * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid - * data left (== 14/13 bytes) */ - if (parse_dcc(data, data_limit, &dcc_ip, - &dcc_port, &addr_beg_p, &addr_end_p)) { - pr_debug("unable to parse dcc command\n"); - continue; + data += 6; + nick_end = data; + i = 0; + while ((*nick_end != 0x0d) && + (*(nick_end + 1) != '\n')) { + nick_end++; + i++; } + tuple = &ct->tuplehash[!dir].tuple; + temp = search_client_by_ip(tuple); + if (temp && temp->nickname) { + kfree(temp->nickname); + temp->nickname = kmalloc(i, GFP_ATOMIC); + if (temp->nickname) { + temp->nickname_len = i; + memcpy(temp->nickname, data, + temp->nickname_len); + temp->conn_to_server = true; + } else { + list_del(&temp->ptr); + no_of_clients--; + kfree(temp); + ret = NF_ACCEPT; + } + } + /*NICK during registration*/ + ret = NF_ACCEPT; + goto out; + } + } - pr_debug("DCC bound ip/port: %pI4:%u\n", - &dcc_ip, dcc_port); - - /* dcc_ip can be the internal OR external (NAT'ed) IP */ - tuple = &ct->tuplehash[dir].tuple; - if (tuple->src.u3.ip != dcc_ip && - tuple->dst.u3.ip != dcc_ip) { - net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", - &tuple->src.u3.ip, - &dcc_ip, dcc_port); + else{ + /*Parsing NICK command from client to create an entry + * strlen("NICK xxxxxx") + * 5+strlen("xxxxxx")=1 (minimum length of nickname) + */ + data = ib_ptr; + data_limit = ib_ptr + skb->len - dataoff; + while (data < data_limit - (5 + MINLENNICK)) { + if (memcmp(data, "NICK ", 5)) { + data++; continue; } + data += 5; + ret = handle_nickname(ct, dir, data); + goto out; + } - exp = nf_ct_expect_alloc(ct); - if (exp == NULL) { - nf_ct_helper_log(skb, ct, - "cannot alloc expectation"); - ret = NF_DROP; - goto out; + data = ib_ptr; + while (data < data_limit - 6) { + if (memcmp(data, "QUIT :", 6)) { + data++; + continue; } - tuple = &ct->tuplehash[!dir].tuple; - port = htons(dcc_port); - nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, - tuple->src.l3num, - NULL, &tuple->dst.u3, - IPPROTO_TCP, NULL, &port); - - nf_nat_irc = rcu_dereference(nf_nat_irc_hook); - if (nf_nat_irc && ct->status & IPS_NAT_MASK) - ret = nf_nat_irc(skb, ctinfo, protoff, - addr_beg_p - ib_ptr, - addr_end_p - addr_beg_p, - exp); - else if (nf_ct_expect_related(exp) != 0) { - nf_ct_helper_log(skb, ct, - "cannot add expectation"); - ret = NF_DROP; + /* Parsing QUIT to free the list entry + */ + tuple = &ct->tuplehash[dir].tuple; + temp = search_client_by_ip(tuple); + if (temp) { + list_del(&temp->ptr); + no_of_clients--; + kfree(temp->nickname); + kfree(temp); } - nf_ct_expect_put(exp); + ret = NF_ACCEPT; goto out; } + /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 + * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 + */ + data = ib_ptr; + while (data < data_limit - (19 + MINMATCHLEN)) { + if (memcmp(data, "\1DCC ", 5)) { + data++; + continue; + } + data += 5; + /* we have at least (19+MINMATCHLEN)-5 + *bytes valid data left + */ + iph = ip_hdr(skb); + pr_debug("DCC found in master %pI4:%u %pI4:%u\n", + &iph->saddr, ntohs(th->source), + &iph->daddr, ntohs(th->dest)); + + for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { + if (memcmp(data, dccprotos[i], + strlen(dccprotos[i]))) { + /* no match */ + continue; + } + data += strlen(dccprotos[i]); + pr_debug("DCC %s detected\n", dccprotos[i]); + + /* we have at least + * (19+MINMATCHLEN)-5-dccprotos[i].matchlen + *bytes valid data left (== 14/13 bytes) + */ + if (parse_dcc(data, data_limit, &dcc_ip, + &dcc_port, &addr_beg_p, + &addr_end_p)) { + pr_debug("unable to parse dcc command\n"); + continue; + } + + pr_debug("DCC bound ip/port: %pI4:%u\n", + &dcc_ip, dcc_port); + + /* dcc_ip can be the internal OR + *external (NAT'ed) IP + */ + tuple = &ct->tuplehash[dir].tuple; + if (tuple->src.u3.ip != dcc_ip && + tuple->dst.u3.ip != dcc_ip) { + net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", + &tuple->src.u3.ip, + &dcc_ip, dcc_port); + continue; + } + + exp = nf_ct_expect_alloc(ct); + if (!exp) { + nf_ct_helper_log(skb, ct, + "cannot alloc expectation"); + ret = NF_DROP; + goto out; + } + tuple = &ct->tuplehash[!dir].tuple; + port = htons(dcc_port); + nf_ct_expect_init(exp, + NF_CT_EXPECT_CLASS_DEFAULT, + tuple->src.l3num, + NULL, &tuple->dst.u3, + IPPROTO_TCP, NULL, &port); + + nf_nat_irc = rcu_dereference(nf_nat_irc_hook); + + tuple = &ct->tuplehash[dir].tuple; + for_print = ib_ptr; + /* strlen("PRIVMSG xxxx :\1DCC + *SENT t AAAAAAAA P\1\n")=26 + * 8+strlen(xxxx) = 1(min length)+7+ + *MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 + *Parsing DCC command to get client name and + *check whether it is an internal client + */ + while (for_print < + data_limit - (25 + MINMATCHLEN)) { + if (memcmp(for_print, "PRIVMSG ", 8)) { + for_print++; + continue; + } + for_print += 8; + mangle = mangle_ip(ct, + dir, for_print); + break; + } + if (mangle && + nf_nat_irc && + ct->status & IPS_NAT_MASK) + ret = nf_nat_irc(skb, ctinfo, + protoff, + addr_beg_p - ib_ptr, + addr_end_p + - addr_beg_p, + exp); + + else if (mangle && + nf_ct_expect_related(exp) + != 0) { + nf_ct_helper_log(skb, ct, + "cannot add expectation"); + ret = NF_DROP; + } + nf_ct_expect_put(exp); + goto out; + } + } } out: spin_unlock_bh(&irc_buffer_lock); @@ -272,7 +543,8 @@ static int __init nf_conntrack_irc_init(void) kfree(irc_buffer); return ret; } - + no_of_clients = 0; + INIT_LIST_HEAD(&client_list.ptr); return 0; } diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 4dbb5bad4363ba9f67fea15160bb7b0b88ae9c38..222d36ac9229f052552ee59ce153cdbc309fcdc3 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -49,13 +49,28 @@ module_param(sip_direct_signalling, int, 0600); MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar " "only (default 1)"); -static int sip_direct_media __read_mostly = 1; -module_param(sip_direct_media, int, 0600); -MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " - "endpoints only (default 1)"); - const struct nf_nat_sip_hooks *nf_nat_sip_hooks; EXPORT_SYMBOL_GPL(nf_nat_sip_hooks); +static struct ctl_table_header *sip_sysctl_header; +static unsigned int nf_ct_disable_sip_alg; +static int sip_direct_media = 1; +static struct ctl_table sip_sysctl_tbl[] = { + { + .procname = "nf_conntrack_disable_sip_alg", + .data = &nf_ct_disable_sip_alg, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "nf_conntrack_sip_direct_media", + .data = &sip_direct_media, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + {} +}; static int string_len(const struct nf_conn *ct, const char *dptr, const char *limit, int *shift) @@ -1459,6 +1474,8 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, const struct nf_nat_sip_hooks *hooks; int ret; + if (nf_ct_disable_sip_alg) + return NF_ACCEPT; if (strncasecmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) ret = process_sip_request(skb, protoff, dataoff, dptr, datalen); else @@ -1619,6 +1636,15 @@ static int __init nf_conntrack_sip_init(void) int i, ret; NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sip_master)); + sip_sysctl_header = register_net_sysctl(&init_net, "net/netfilter", + sip_sysctl_tbl); + if (!sip_sysctl_header) + pr_debug("nf_ct_sip:Unable to register SIP systbl\n"); + + if (nf_ct_disable_sip_alg) + pr_debug("nf_ct_sip: SIP ALG disabled\n"); + else + pr_debug("nf_ct_sip: SIP ALG enabled\n"); if (ports_c == 0) ports[ports_c++] = SIP_PORT; diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index 791fac4fd74534e0481d409a182c2d5e5544deac..2f2414e2904bde01dabb30b98d1df02abfaac472 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -111,13 +111,26 @@ static int map_addr(struct sk_buff *skb, unsigned int protoff, newaddr = ct->tuplehash[!dir].tuple.src.u3; newport = ct_sip_info->forced_dport ? : ct->tuplehash[!dir].tuple.src.u.udp.port; + } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) && + ct->tuplehash[dir].tuple.src.u.udp.port != port) { + newaddr = ct->tuplehash[!dir].tuple.dst.u3; + newport = 0; + } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) && + ct->tuplehash[dir].tuple.dst.u.udp.port != port) { + newaddr = ct->tuplehash[!dir].tuple.src.u3; + newport = 0; } else return 1; if (nf_inet_addr_cmp(&newaddr, addr) && newport == port) return 1; - buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport)); + if (newport == 0) + buflen = sip_sprintf_addr(ct, buffer, &newaddr, false); + else + buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, + ntohs(newport)); + return mangle_packet(skb, protoff, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8e054c63b54e46495e3d2cc4c85f8a7652dc425a..1c5f21d660a1e4b581a8a30c5f15c2ff99b62780 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1050,16 +1050,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE) return NULL; - /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ - if ((size >> PAGE_SHIFT) + 2 > totalram_pages) - return NULL; - - /* __GFP_NORETRY is not fully supported by kvmalloc but it should - * work reasonably well if sz is too large and bail out rather - * than shoot all processes down before realizing there is nothing - * more to reclaim. - */ - info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY); + info = kvmalloc(sz, GFP_KERNEL_ACCOUNT); if (!info) return NULL; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index c070dfc0190aa2bd84871eee50596a3d7c735636..c92894c3e40a34b34bdf533b1712fd1c1909595e 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, { u32 addr_len; - if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { + if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && + info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); if (addr_len != sizeof(struct in_addr) && addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index c956997f96739b0424df49901a77a285ffc5ebb0..3d484f352f5623f9effbb36a1f855368d54d660f 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1429,7 +1429,7 @@ static int qrtr_release(struct socket *sock) if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); - sock_set_flag(sk, SOCK_DEAD); + sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 3684153cd8a9e1dfc680bcabf3c13e4fba2f507e..6499aecfbfc43389965f13b638a4ec7b6bcb8d5b 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -112,6 +112,8 @@ static void mall_destroy(struct tcf_proto *tp) if (!head) return; + tcf_unbind_filter(tp, &head->res); + if (tc_should_offload(dev, head->flags)) mall_destroy_hw_filter(tp, head, (unsigned long) head); diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index a76937ee0b2dbf08f68574f727aa91d3a496fb4b..52829fdc280b34ab396d8a91a6383bb63176255c 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -464,11 +464,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr.res, base); } - if (old_r) - tcf_exts_change(&r->exts, &e); - else - tcf_exts_change(&cr.exts, &e); - if (old_r && old_r != r) { err = tcindex_filter_result_init(old_r); if (err < 0) { @@ -479,12 +474,15 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, oldp = p; r->res = cr.res; + tcf_exts_change(&r->exts, &e); + rcu_assign_pointer(tp->root, cp); if (r == &new_filter_result) { struct tcindex_filter *nfp; struct tcindex_filter __rcu **fp; + f->result.res = r->res; tcf_exts_change(&f->result.exts, &r->exts); fp = cp->h + (handle % cp->hash); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index dfc8c51e4d74ec378a338ab9bb2560b3811f393b..f2fd556c1233a5d9a4cdd93afd05251a64d5480e 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -449,14 +449,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode) return transport->shutdown(vsock_sk(sk), mode); } -void vsock_pending_work(struct work_struct *work) +static void vsock_pending_work(struct work_struct *work) { struct sock *sk; struct sock *listener; struct vsock_sock *vsk; bool cleanup; - vsk = container_of(work, struct vsock_sock, dwork.work); + vsk = container_of(work, struct vsock_sock, pending_work.work); sk = sk_vsock(vsk); listener = vsk->listener; cleanup = true; @@ -496,7 +496,6 @@ void vsock_pending_work(struct work_struct *work) sock_put(sk); sock_put(listener); } -EXPORT_SYMBOL_GPL(vsock_pending_work); /**** SOCKET OPERATIONS ****/ @@ -595,6 +594,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) return retval; } +static void vsock_connect_timeout(struct work_struct *work); + struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, @@ -637,6 +638,8 @@ struct sock *__vsock_create(struct net *net, vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; + INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); + INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); psk = parent ? vsock_sk(parent) : NULL; if (parent) { @@ -1116,7 +1119,7 @@ static void vsock_connect_timeout(struct work_struct *work) struct vsock_sock *vsk; int cancel = 0; - vsk = container_of(work, struct vsock_sock, dwork.work); + vsk = container_of(work, struct vsock_sock, connect_work.work); sk = sk_vsock(vsk); lock_sock(sk); @@ -1220,9 +1223,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, * timeout fires. */ sock_hold(sk); - INIT_DELAYED_WORK(&vsk->dwork, - vsock_connect_timeout); - schedule_delayed_work(&vsk->dwork, timeout); + schedule_delayed_work(&vsk->connect_work, timeout); /* Skip ahead to preserve error code set above. */ goto out_wait; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 10ae7823a19def7bde20d669e3913a40178e7da2..d5be519b02712aff8bbe779acd1f98f269809a36 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1091,8 +1091,7 @@ static int vmci_transport_recv_listen(struct sock *sk, vpending->listener = sk; sock_hold(sk); sock_hold(pending); - INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); - schedule_delayed_work(&vpending->dwork, HZ); + schedule_delayed_work(&vpending->pending_work, HZ); out: return err; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c2a52658f2734c999ba993cd7549797d4382c524..477091ddf02b0a57b89602b117ffe1bdff17589a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -490,7 +490,8 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { - [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, + [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY, + .len = FILS_MAX_KEK_LEN }, [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, }; @@ -9158,6 +9159,45 @@ static int nl80211_update_connect_params(struct sk_buff *skb, changed |= UPDATE_ASSOC_IES; } + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && + info->attrs[NL80211_ATTR_FILS_ERP_REALM] && + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + connect.fils_erp_username = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_username_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_realm = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_realm_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_next_seq_num = + nla_get_u16( + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); + connect.fils_erp_rrk = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + connect.fils_erp_rrk_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + changed |= UPDATE_FILS_ERP_INFO; + } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || + info->attrs[NL80211_ATTR_FILS_ERP_REALM] || + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + return -EINVAL; + } + + if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { + u32 auth_type = + nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + if (!nl80211_valid_auth_type(rdev, auth_type, + NL80211_CMD_CONNECT)) + return -EINVAL; + connect.auth_type = auth_type; + changed |= UPDATE_AUTH_TYPE; + } + wdev_lock(dev->ieee80211_ptr); if (!wdev->current_bss) ret = -ENOLINK; @@ -10974,15 +11014,27 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; + if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] || + (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_STA) && + !tb[NL80211_REKEY_DATA_KCK])) + return -EINVAL; + if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) + if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) + if (tb[NL80211_REKEY_DATA_KCK] && + nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; + memset(&rekey_data, 0, sizeof(rekey_data)); rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); - rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); + rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]); + if (tb[NL80211_REKEY_DATA_KCK]) + rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); wdev_lock(wdev); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index dbfcfefd6d69309027c2ccf5871894dcbe5b4fa4..8b09d6985a9405afca56c4502e2591e2c1271c2b 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1435,6 +1435,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) (ut[i].family != prev_family)) return -EINVAL; + if (ut[i].mode >= XFRM_MODE_MAX) + return -EINVAL; + prev_family = ut[i].family; switch (ut[i].family) { diff --git a/scripts/depmod.sh b/scripts/depmod.sh index 9831cca31240b8b9890d701308ed53bcd62c2ccf..f41b0a4b575cee19b6eeb56dc76612fb91fec734 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -11,10 +11,16 @@ DEPMOD=$1 KERNELRELEASE=$2 SYMBOL_PREFIX=$3 -if ! test -r System.map -a -x "$DEPMOD"; then +if ! test -r System.map ; then exit 0 fi +if [ -z $(command -v $DEPMOD) ]; then + echo "'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "This is probably in the kmod package." >&2 + exit 1 +fi + # older versions of depmod don't support -P # support was added in module-init-tools 3.13 if test -n "$SYMBOL_PREFIX"; then diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c index f66577307573c8c8071f8e257cc38575c95f9cdb..94a7a27c77f928426aa174621b6202dce7bf4cc9 100644 --- a/security/pfe/pfk.c +++ b/security/pfe/pfk.c @@ -219,11 +219,7 @@ static struct inode *pfk_bio_get_inode(const struct bio *bio) if (!page_mapping(bio->bi_io_vec->bv_page)) return NULL; - if (!bio->bi_io_vec->bv_page->mapping->host) - - return NULL; - - return bio->bi_io_vec->bv_page->mapping->host; + return page_mapping(bio->bi_io_vec->bv_page)->host; } /** diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c index 6452b42201366b0f3a68349ad6e082e215919d8a..509a7b47d16d61ecd63d01ccc1ac6c4fc80851fb 100644 --- a/security/pfe/pfk_ice.c +++ b/security/pfe/pfk_ice.c @@ -58,38 +58,98 @@ #define CONTEXT_SIZE 0x1000 #define KEYMASTER_UTILS_CMD_ID 0x200UL +#define KEYMASTER_GET_VERSION (KEYMASTER_UTILS_CMD_ID + 0UL) #define KEYMASTER_SET_ICE_KEY (KEYMASTER_UTILS_CMD_ID + 18UL) #define KEYMASTER_CLEAR_ICE_KEY (KEYMASTER_UTILS_CMD_ID + 19UL) +#define USE_KM_MAJOR_VERSION 4 +#define USE_KM_MINOR_VERSION 513 + #define ICE_KEY_SIZE 32 #define ICE_SALT_SIZE 32 +static uint32_t keymaster_minor_version; +static uint32_t keymaster_major_version; + static uint8_t ice_key[ICE_KEY_SIZE]; static uint8_t ice_salt[ICE_KEY_SIZE]; static struct qseecom_handle *qhandle; -static int set_wrapped_key(uint32_t index, const uint8_t *key, - const uint8_t *salt) +static uint32_t get_keymaster_version(struct qseecom_handle *qhandle) { int ret = 0; - u32 set_req_len = 0; - u32 set_rsp_len = 0; - struct pfk_ice_key_req *set_req_buf; - struct pfk_ice_key_rsp *set_rsp_buf; + struct pfk_km_get_version_req *req; + struct pfk_km_get_version_rsp *rsp; - memcpy(ice_key, key, sizeof(ice_key)); - memcpy(ice_salt, salt, sizeof(ice_salt)); + req = (struct pfk_km_get_version_req *) qhandle->sbuf; + req->cmd_id = KEYMASTER_GET_VERSION; + + rsp = (struct pfk_km_get_version_rsp *) (qhandle->sbuf + + sizeof(struct pfk_km_get_version_req)); + + ret = qseecom_send_command(qhandle, + req, sizeof(struct pfk_km_get_version_req), + rsp, sizeof(struct pfk_km_get_version_rsp)); + + if (ret) { + pr_err("%s: Get KM version error: Status %d\n", __func__, + rsp->status); + return ret; + } + + keymaster_major_version = rsp->ta_major_version; + keymaster_minor_version = rsp->ta_minor_version; + + return ret; +} +/* + * This change is to make sure there are no issues when pfk calls into + * keymaster for unwrapping keys before setting them. This can happen when + * an older version of keymaster is used which means the unwrapping logic + * cannot be done in trustzone. + */ + +static bool should_use_keymaster(void) +{ + int ret = -1; if (!qhandle) { ret = qseecom_start_app(&qhandle, "keymaster64", CONTEXT_SIZE); if (ret) { pr_err("Qseecom start app failed\n"); - return ret; + return false; } } + if (keymaster_major_version == 0 || keymaster_minor_version == 0) { + ret = get_keymaster_version(qhandle); + if (ret) { + pr_err("Error in getting keymaster version\n"); + return false; + } + } + + if (keymaster_major_version == USE_KM_MAJOR_VERSION && + keymaster_minor_version < USE_KM_MINOR_VERSION) + return true; + else + return false; +} + +static int set_wrapped_key(uint32_t index, const uint8_t *key, + const uint8_t *salt) +{ + int ret = 0; + u32 set_req_len = 0; + u32 set_rsp_len = 0; + struct pfk_ice_key_req *set_req_buf; + struct pfk_ice_key_rsp *set_rsp_buf; + + memcpy(ice_key, key, sizeof(ice_key)); + memcpy(ice_salt, salt, sizeof(ice_salt)); + set_req_buf = (struct pfk_ice_key_req *) qhandle->sbuf; set_req_buf->cmd_id = KEYMASTER_SET_ICE_KEY; set_req_buf->index = index; @@ -232,7 +292,7 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, goto out; } - if (pfk_wrapped_key_supported()) { + if (pfk_wrapped_key_supported() && should_use_keymaster()) { pr_debug("%s: Setting wrapped key\n", __func__); ret = set_wrapped_key(index, key, salt); } else { @@ -248,7 +308,7 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, goto out; } /* Try to invalidate the key to keep ICE in proper state */ - if (pfk_wrapped_key_supported()) + if (pfk_wrapped_key_supported() && should_use_keymaster()) ret1 = clear_wrapped_key(index); else ret1 = clear_key(index); @@ -285,7 +345,7 @@ int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type) return ret; } - if (pfk_wrapped_key_supported()) { + if (pfk_wrapped_key_supported() && should_use_keymaster()) { ret = clear_wrapped_key(index); pr_debug("%s: Clearing wrapped key\n", __func__); } else { diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h index 5adfcb200b6882a2914465e0be3325e4f059254a..897a6d0ab7667f7c4902deb26def8eac17217ec8 100644 --- a/security/pfe/pfk_ice.h +++ b/security/pfe/pfk_ice.h @@ -22,19 +22,31 @@ #include -struct __attribute__ ((__packed__)) pfk_ice_key_req { +struct pfk_ice_key_req { uint32_t cmd_id; uint32_t index; uint32_t ice_key_offset; uint32_t ice_key_size; uint32_t ice_salt_offset; uint32_t ice_salt_size; -}; +} __packed; -struct __attribute__ ((__packed__)) pfk_ice_key_rsp { +struct pfk_ice_key_rsp { uint32_t ret; uint32_t cmd_id; -}; +} __packed; + +struct pfk_km_get_version_req { + uint32_t cmd_id; +} __packed; + +struct pfk_km_get_version_rsp { + int status; + uint32_t major_version; + uint32_t minor_version; + uint32_t ta_major_version; + uint32_t ta_minor_version; +} __packed; int pfk_ice_init(void); int pfk_ice_deinit(void); diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 7f89d3c79a4b740b655342ebf4958d05fe6c6646..753d5fc4b284fa66cd0f3911d4096322fd7b3bf9 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -242,16 +242,12 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, int err; while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { - size_t aligned_size; if (err != -ENOMEM) return err; if (size <= PAGE_SIZE) return -ENOMEM; - aligned_size = PAGE_SIZE << get_order(size); - if (size != aligned_size) - size = aligned_size; - else - size >>= 1; + size >>= 1; + size = PAGE_SIZE << get_order(size); } if (! dmab->area) return -ENOMEM; diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index 289ae6bb81d9d1f86f24b5bedc5a49feb8b00c0c..8ebbca554e995f47eff787d0ffd36fc244abf7d8 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c @@ -163,6 +163,7 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, int count, res; unsigned char buf[32], *pbuf; unsigned long flags; + bool check_resched = !in_atomic(); if (up) { vmidi->trigger = 1; @@ -200,6 +201,15 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, vmidi->event.type = SNDRV_SEQ_EVENT_NONE; } } + if (!check_resched) + continue; + /* do temporary unlock & cond_resched() for avoiding + * CPU soft lockup, which may happen via a write from + * a huge rawmidi buffer + */ + spin_unlock_irqrestore(&substream->runtime->lock, flags); + cond_resched(); + spin_lock_irqsave(&substream->runtime->lock, flags); } out: spin_unlock_irqrestore(&substream->runtime->lock, flags); diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h index f4fcdf93f3c8ed15c77283bca997d36276e2434d..d84620a0c26c4b95017e186930804f145656d981 100644 --- a/sound/pci/cs5535audio/cs5535audio.h +++ b/sound/pci/cs5535audio/cs5535audio.h @@ -67,9 +67,9 @@ struct cs5535audio_dma_ops { }; struct cs5535audio_dma_desc { - u32 addr; - u16 size; - u16 ctlreserved; + __le32 addr; + __le16 size; + __le16 ctlreserved; }; struct cs5535audio_dma { diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c index ee7065f6e162bb965a01ffac057360b33d13d3a7..326caec854e1089baedc9b91bd04c9134888478b 100644 --- a/sound/pci/cs5535audio/cs5535audio_pcm.c +++ b/sound/pci/cs5535audio/cs5535audio_pcm.c @@ -158,8 +158,8 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au, lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr); lastdesc->size = 0; lastdesc->ctlreserved = cpu_to_le16(PRD_JMP); - jmpprd_addr = cpu_to_le32(lastdesc->addr + - (sizeof(struct cs5535audio_dma_desc)*periods)); + jmpprd_addr = (u32)dma->desc_buf.addr + + sizeof(struct cs5535audio_dma_desc) * periods; dma->substream = substream; dma->period_bytes = period_bytes; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 62fbdbe74b938d6bd16560db23211dbbc6e08035..22c13ad6a9ae4afdf074f9194332dea1c4e4d432 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2205,7 +2205,7 @@ static int azx_probe(struct pci_dev *pci, */ static struct snd_pci_quirk power_save_blacklist[] = { /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ - SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), + SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 88ce2f1022e1a5d5d8d551e17ee6e44b87545b03..16197ad4512a40cf79849b9770231ee35170f214 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -211,6 +211,7 @@ static void cx_auto_reboot_notify(struct hda_codec *codec) struct conexant_spec *spec = codec->spec; switch (codec->core.vendor_id) { + case 0x14f12008: /* CX8200 */ case 0x14f150f2: /* CX20722 */ case 0x14f150f4: /* CX20724 */ break; @@ -218,13 +219,14 @@ static void cx_auto_reboot_notify(struct hda_codec *codec) return; } - /* Turn the CX20722 codec into D3 to avoid spurious noises + /* Turn the problematic codec into D3 to avoid spurious noises from the internal speaker during (and after) reboot */ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); snd_hda_codec_write(codec, codec->core.afg, 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D3); + msleep(10); } static void cx_auto_free(struct hda_codec *codec) diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c index d4298af6d3eee6349fb2e3ca03c9c2f621aebae5..c0d0bf44f365b67e0c28321c352d146448c46339 100644 --- a/sound/pci/vx222/vx222_ops.c +++ b/sound/pci/vx222/vx222_ops.c @@ -275,7 +275,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ for (; length > 0; length--) { - outl(cpu_to_le32(*addr), port); + outl(*addr, port); addr++; } addr = (u32 *)runtime->dma_area; @@ -285,7 +285,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ for (; count > 0; count--) { - outl(cpu_to_le32(*addr), port); + outl(*addr, port); addr++; } @@ -313,7 +313,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ for (; length > 0; length--) - *addr++ = le32_to_cpu(inl(port)); + *addr++ = inl(port); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } @@ -321,7 +321,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ for (; count > 0; count--) - *addr++ = le32_to_cpu(inl(port)); + *addr++ = inl(port); vx2_release_pseudo_dma(chip); } diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c index 8cde402263557d7ce6ea2c43edf317d61c6ca5c1..4c4ef1fec69fefa9b0b8ce499c3dca0efbd757d3 100644 --- a/sound/pcmcia/vx/vxp_ops.c +++ b/sound/pcmcia/vx/vxp_ops.c @@ -375,7 +375,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ for (; length > 0; length--) { - outw(cpu_to_le16(*addr), port); + outw(*addr, port); addr++; } addr = (unsigned short *)runtime->dma_area; @@ -385,7 +385,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ for (; count > 0; count--) { - outw(cpu_to_le16(*addr), port); + outw(*addr, port); addr++; } vx_release_pseudo_dma(chip); @@ -417,7 +417,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, length >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ for (; length > 0; length--) - *addr++ = le16_to_cpu(inw(port)); + *addr++ = inw(port); addr = (unsigned short *)runtime->dma_area; pipe->hw_ptr = 0; } @@ -425,12 +425,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, count >>= 1; /* in 16bit words */ /* Transfer using pseudo-dma. */ for (; count > 1; count--) - *addr++ = le16_to_cpu(inw(port)); + *addr++ = inw(port); /* Disable DMA */ pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK; vx_outb(chip, DIALOG, pchip->regDIALOG); /* Read the last word (16 bits) */ - *addr = le16_to_cpu(inw(port)); + *addr = inw(port); /* Disable 16-bit accesses */ pchip->regDIALOG &= ~VXP_DLG_DMA16_SEL_MASK; vx_outb(chip, DIALOG, pchip->regDIALOG); diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 694db27b11fa578ec87ae9ca799876c84d715e59..13354d6304a848759433122e124d83119321bd71 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -238,7 +238,7 @@ static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE( static const struct soc_enum rx2_mix1_inp_enum[] = { SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text), SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text), - SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B2_CTL, 0, 6, rx_mix1_text), }; /* RX2 MIX2 */ @@ -249,7 +249,7 @@ static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE( static const struct soc_enum rx3_mix1_inp_enum[] = { SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text), SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text), - SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text), }; /* DEC */ diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c index 20755ecc7f9ea7e17d1fcdd5096f577929bb1957..a02dec251afedb525b53f43e36a9ef9d743efe31 100644 --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c @@ -116,23 +116,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card); struct snd_soc_jack *jack = &ctx->jack; - /** - * TI supports 4 butons headset detection - * KEY_MEDIA - * KEY_VOICECOMMAND - * KEY_VOLUMEUP - * KEY_VOLUMEDOWN - */ - if (ctx->ts3a227e_present) - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | - SND_JACK_BTN_0 | SND_JACK_BTN_1 | - SND_JACK_BTN_2 | SND_JACK_BTN_3; - else - jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE; + if (ctx->ts3a227e_present) { + /* + * The jack has already been created in the + * cht_max98090_headset_init() function. + */ + snd_soc_jack_notifier_register(jack, &cht_jack_nb); + return 0; + } + + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE; ret = snd_soc_card_jack_new(runtime->card, "Headset Jack", jack_type, jack, NULL, 0); - if (ret) { dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret); return ret; @@ -188,6 +184,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component) { struct snd_soc_card *card = component->card; struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); + struct snd_soc_jack *jack = &ctx->jack; + int jack_type; + int ret; + + /* + * TI supports 4 butons headset detection + * KEY_MEDIA + * KEY_VOICECOMMAND + * KEY_VOLUMEUP + * KEY_VOLUMEDOWN + */ + jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | + SND_JACK_BTN_0 | SND_JACK_BTN_1 | + SND_JACK_BTN_2 | SND_JACK_BTN_3; + + ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type, + jack, NULL, 0); + if (ret) { + dev_err(card->dev, "Headset Jack creation failed %d\n", ret); + return ret; + } return ts3a227e_enable_jack_detect(component, &ctx->jack); } diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 2684a2ba33cd366474ea8b056872ca469aeba72e..e28edb1f72635c9437cbed7072b6dcc9e8c31186 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -479,10 +479,10 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, } if (req_rate[0] % 48000 == 0) - adg->flags = AUDIO_OUT_48; + adg->flags |= AUDIO_OUT_48; if (of_get_property(np, "clkout-lr-asynchronous", NULL)) - adg->flags = LRCLK_ASYNC; + adg->flags |= LRCLK_ASYNC; /* * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 403e97d5e24322775dc01953ef32f8f4e3dd9276..8418462298e719bf756a9c0745afb24f8f5b21a1 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -219,6 +219,7 @@ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ +#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ @@ -338,6 +339,7 @@ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ @@ -370,5 +372,6 @@ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ +#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ #endif /* _ASM_X86_CPUFEATURES_H */