diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index cadb7a9a52184d3106ccaf4eb3118a4cf554f0b0..b41046b5713b9da188e1e0d9754d7229c6e06c5b 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -358,6 +358,8 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + /sys/devices/system/cpu/vulnerabilities/itlb_multihit Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/hw-vuln/index.rst b/Documentation/hw-vuln/index.rst index ffc064c1ec681c017ae5c30b267131c383144136..24f53c5013665fd9623e05ed5c4b6e6cf0dd3857 100644 --- a/Documentation/hw-vuln/index.rst +++ b/Documentation/hw-vuln/index.rst @@ -11,3 +11,5 @@ are configurable at compile, boot or run time. l1tf mds + tsx_async_abort + multihit.rst diff --git a/Documentation/hw-vuln/mds.rst b/Documentation/hw-vuln/mds.rst index daf6fdac49a3ebe8ffc7ea5af42a273a0e323fb2..fbbd1719afb927cf0e1c5dcac27e88347c48ec49 100644 --- a/Documentation/hw-vuln/mds.rst +++ b/Documentation/hw-vuln/mds.rst @@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are: ============ ============================================================= -Not specifying this option is equivalent to "mds=full". - +Not specifying this option is equivalent to "mds=full". For processors +that are affected by both TAA (TSX Asynchronous Abort) and MDS, +specifying just "mds=off" without an accompanying "tsx_async_abort=off" +will have no effect as the same mitigation is used for both +vulnerabilities. Mitigation selection guide -------------------------- diff --git a/Documentation/hw-vuln/multihit.rst b/Documentation/hw-vuln/multihit.rst new file mode 100644 index 0000000000000000000000000000000000000000..ba9988d8bce500af602492ee571b35d24b9678b3 --- /dev/null +++ b/Documentation/hw-vuln/multihit.rst @@ -0,0 +1,163 @@ +iTLB multihit +============= + +iTLB multihit is an erratum where some processors may incur a machine check +error, possibly resulting in an unrecoverable CPU lockup, when an +instruction fetch hits multiple entries in the instruction TLB. This can +occur when the page size is changed along with either the physical address +or cache type. A malicious guest running on a virtualized system can +exploit this erratum to perform a denial of service attack. + + +Affected processors +------------------- + +Variations of this erratum are present on most Intel Core and Xeon processor +models. The erratum is not present on: + + - non-Intel processors + + - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont) + + - Intel processors that have the PSCHANGE_MC_NO bit set in the + IA32_ARCH_CAPABILITIES MSR. + + +Related CVEs +------------ + +The following CVE entry is related to this issue: + + ============== ================================================= + CVE-2018-12207 Machine Check Error Avoidance on Page Size Change + ============== ================================================= + + +Problem +------- + +Privileged software, including OS and virtual machine managers (VMM), are in +charge of memory management. A key component in memory management is the control +of the page tables. Modern processors use virtual memory, a technique that creates +the illusion of a very large memory for processors. This virtual space is split +into pages of a given size. Page tables translate virtual addresses to physical +addresses. + +To reduce latency when performing a virtual to physical address translation, +processors include a structure, called TLB, that caches recent translations. +There are separate TLBs for instruction (iTLB) and data (dTLB). + +Under this errata, instructions are fetched from a linear address translated +using a 4 KB translation cached in the iTLB. Privileged software modifies the +paging structure so that the same linear address using large page size (2 MB, 4 +MB, 1 GB) with a different physical address or memory type. After the page +structure modification but before the software invalidates any iTLB entries for +the linear address, a code fetch that happens on the same linear address may +cause a machine-check error which can result in a system hang or shutdown. + + +Attack scenarios +---------------- + +Attacks against the iTLB multihit erratum can be mounted from malicious +guests in a virtualized system. + + +iTLB multihit system information +-------------------------------- + +The Linux kernel provides a sysfs interface to enumerate the current iTLB +multihit status of the system:whether the system is vulnerable and which +mitigations are active. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/itlb_multihit + +The possible values in this file are: + +.. list-table:: + + * - Not affected + - The processor is not vulnerable. + * - KVM: Mitigation: Split huge pages + - Software changes mitigate this issue. + * - KVM: Vulnerable + - The processor is vulnerable, but no mitigation enabled + + +Enumeration of the erratum +-------------------------------- + +A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr +and will be set on CPU's which are mitigated against this issue. + + ======================================= =========== =============================== + IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model + IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model + IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable + ======================================= =========== =============================== + + +Mitigation mechanism +------------------------- + +This erratum can be mitigated by restricting the use of large page sizes to +non-executable pages. This forces all iTLB entries to be 4K, and removes +the possibility of multiple hits. + +In order to mitigate the vulnerability, KVM initially marks all huge pages +as non-executable. If the guest attempts to execute in one of those pages, +the page is broken down into 4K pages, which are then marked executable. + +If EPT is disabled or not available on the host, KVM is in control of TLB +flushes and the problematic situation cannot happen. However, the shadow +EPT paging mechanism used by nested virtualization is vulnerable, because +the nested guest can trigger multiple iTLB hits by modifying its own +(non-nested) page tables. For simplicity, KVM will make large pages +non-executable in all shadow paging modes. + +Mitigation control on the kernel command line and KVM - module parameter +------------------------------------------------------------------------ + +The KVM hypervisor mitigation mechanism for marking huge pages as +non-executable can be controlled with a module parameter "nx_huge_pages=". +The kernel command line allows to control the iTLB multihit mitigations at +boot time with the option "kvm.nx_huge_pages=". + +The valid arguments for these options are: + + ========== ================================================================ + force Mitigation is enabled. In this case, the mitigation implements + non-executable huge pages in Linux kernel KVM module. All huge + pages in the EPT are marked as non-executable. + If a guest attempts to execute in one of those pages, the page is + broken down into 4K pages, which are then marked executable. + + off Mitigation is disabled. + + auto Enable mitigation only if the platform is affected and the kernel + was not booted with the "mitigations=off" command line parameter. + This is the default option. + ========== ================================================================ + + +Mitigation selection guide +-------------------------- + +1. No virtualization in use +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The system is protected by the kernel unconditionally and no further + action is required. + +2. Virtualization with trusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If the guest comes from a trusted source, you may assume that the guest will + not attempt to maliciously exploit these errata and no further action is + required. + +3. Virtualization with untrusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + If the guest comes from an untrusted source, the guest host kernel will need + to apply iTLB multihit mitigation via the kernel command line or kvm + module parameter. diff --git a/Documentation/hw-vuln/tsx_async_abort.rst b/Documentation/hw-vuln/tsx_async_abort.rst new file mode 100644 index 0000000000000000000000000000000000000000..af6865b822d21d18946016c1458a4dc3567050e9 --- /dev/null +++ b/Documentation/hw-vuln/tsx_async_abort.rst @@ -0,0 +1,279 @@ +.. SPDX-License-Identifier: GPL-2.0 + +TAA - TSX Asynchronous Abort +====================================== + +TAA is a hardware vulnerability that allows unprivileged speculative access to +data which is available in various CPU internal buffers by using asynchronous +aborts within an Intel TSX transactional region. + +Affected processors +------------------- + +This vulnerability only affects Intel processors that support Intel +Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8) +is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit +(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations +also mitigate against TAA. + +Whether a processor is affected or not can be read out from the TAA +vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`. + +Related CVEs +------------ + +The following CVE entry is related to this TAA issue: + + ============== ===== =================================================== + CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some + microprocessors utilizing speculative execution may + allow an authenticated user to potentially enable + information disclosure via a side channel with + local access. + ============== ===== =================================================== + +Problem +------- + +When performing store, load or L1 refill operations, processors write +data into temporary microarchitectural structures (buffers). The data in +those buffers can be forwarded to load operations as an optimization. + +Intel TSX is an extension to the x86 instruction set architecture that adds +hardware transactional memory support to improve performance of multi-threaded +software. TSX lets the processor expose and exploit concurrency hidden in an +application due to dynamically avoiding unnecessary synchronization. + +TSX supports atomic memory transactions that are either committed (success) or +aborted. During an abort, operations that happened within the transactional region +are rolled back. An asynchronous abort takes place, among other options, when a +different thread accesses a cache line that is also used within the transactional +region when that access might lead to a data race. + +Immediately after an uncompleted asynchronous abort, certain speculatively +executed loads may read data from those internal buffers and pass it to dependent +operations. This can be then used to infer the value via a cache side channel +attack. + +Because the buffers are potentially shared between Hyper-Threads cross +Hyper-Thread attacks are possible. + +The victim of a malicious actor does not need to make use of TSX. Only the +attacker needs to begin a TSX transaction and raise an asynchronous abort +which in turn potenitally leaks data stored in the buffers. + +More detailed technical information is available in the TAA specific x86 +architecture section: :ref:`Documentation/x86/tsx_async_abort.rst `. + + +Attack scenarios +---------------- + +Attacks against the TAA vulnerability can be implemented from unprivileged +applications running on hosts or guests. + +As for MDS, the attacker has no control over the memory addresses that can +be leaked. Only the victim is responsible for bringing data to the CPU. As +a result, the malicious actor has to sample as much data as possible and +then postprocess it to try to infer any useful information from it. + +A potential attacker only has read access to the data. Also, there is no direct +privilege escalation by using this technique. + + +.. _tsx_async_abort_sys_info: + +TAA system information +----------------------- + +The Linux kernel provides a sysfs interface to enumerate the current TAA status +of mitigated systems. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/tsx_async_abort + +The possible values in this file are: + +.. list-table:: + + * - 'Vulnerable' + - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied. + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The system tries to clear the buffers but the microcode might not support the operation. + * - 'Mitigation: Clear CPU buffers' + - The microcode has been updated to clear the buffers. TSX is still enabled. + * - 'Mitigation: TSX disabled' + - TSX is disabled. + * - 'Not affected' + - The CPU is not affected by this issue. + +.. _ucode_needed: + +Best effort mitigation mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the processor is vulnerable, but the availability of the microcode-based +mitigation mechanism is not advertised via CPUID the kernel selects a best +effort mitigation mode. This mode invokes the mitigation instructions +without a guarantee that they clear the CPU buffers. + +This is done to address virtualization scenarios where the host has the +microcode update applied, but the hypervisor is not yet updated to expose the +CPUID to the guest. If the host has updated microcode the protection takes +effect; otherwise a few CPU cycles are wasted pointlessly. + +The state in the tsx_async_abort sysfs file reflects this situation +accordingly. + + +Mitigation mechanism +-------------------- + +The kernel detects the affected CPUs and the presence of the microcode which is +required. If a CPU is affected and the microcode is available, then the kernel +enables the mitigation by default. + + +The mitigation can be controlled at boot time via a kernel command line option. +See :ref:`taa_mitigation_control_command_line`. + +.. _virt_mechanism: + +Virtualization mitigation +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Affected systems where the host has TAA microcode and TAA is mitigated by +having disabled TSX previously, are not vulnerable regardless of the status +of the VMs. + +In all other cases, if the host either does not have the TAA microcode or +the kernel is not mitigated, the system might be vulnerable. + + +.. _taa_mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +The kernel command line allows to control the TAA mitigations at boot time with +the option "tsx_async_abort=". The valid arguments for this option are: + + ============ ============================================================= + off This option disables the TAA mitigation on affected platforms. + If the system has TSX enabled (see next parameter) and the CPU + is affected, the system is vulnerable. + + full TAA mitigation is enabled. If TSX is enabled, on an affected + system it will clear CPU buffers on ring transitions. On + systems which are MDS-affected and deploy MDS mitigation, + TAA is also mitigated. Specifying this option on those + systems will have no effect. + + full,nosmt The same as tsx_async_abort=full, with SMT disabled on + vulnerable CPUs that have TSX enabled. This is the complete + mitigation. When TSX is disabled, SMT is not disabled because + CPU is not vulnerable to cross-thread TAA attacks. + ============ ============================================================= + +Not specifying this option is equivalent to "tsx_async_abort=full". For +processors that are affected by both TAA and MDS, specifying just +"tsx_async_abort=off" without an accompanying "mds=off" will have no +effect as the same mitigation is used for both vulnerabilities. + +The kernel command line also allows to control the TSX feature using the +parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used +to control the TSX feature and the enumeration of the TSX feature bits (RTM +and HLE) in CPUID. + +The valid options are: + + ============ ============================================================= + off Disables TSX on the system. + + Note that this option takes effect only on newer CPUs which are + not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 + and which get the new IA32_TSX_CTRL MSR through a microcode + update. This new MSR allows for the reliable deactivation of + the TSX functionality. + + on Enables TSX. + + Although there are mitigations for all known security + vulnerabilities, TSX has been known to be an accelerator for + several previous speculation-related CVEs, and so there may be + unknown security risks associated with leaving it enabled. + + auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX + on the system. + ============ ============================================================= + +Not specifying this option is equivalent to "tsx=off". + +The following combinations of the "tsx_async_abort" and "tsx" are possible. For +affected platforms tsx=auto is equivalent to tsx=off and the result will be: + + ========= ========================== ========================================= + tsx=on tsx_async_abort=full The system will use VERW to clear CPU + buffers. Cross-thread attacks are still + possible on SMT machines. + tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT + mitigated. + tsx=on tsx_async_abort=off The system is vulnerable. + tsx=off tsx_async_abort=full TSX might be disabled if microcode + provides a TSX control MSR. If so, + system is not vulnerable. + tsx=off tsx_async_abort=full,nosmt Ditto + tsx=off tsx_async_abort=off ditto + ========= ========================== ========================================= + + +For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU +buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0) +"tsx" command line argument has no effect. + +For the affected platforms below table indicates the mitigation status for the +combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO +and TSX_CTRL_MSR. + + ======= ========= ============= ======================================== + MDS_NO MD_CLEAR TSX_CTRL_MSR Status + ======= ========= ============= ======================================== + 0 0 0 Vulnerable (needs microcode) + 0 1 0 MDS and TAA mitigated via VERW + 1 1 0 MDS fixed, TAA vulnerable if TSX enabled + because MD_CLEAR has no meaning and + VERW is not guaranteed to clear buffers + 1 X 1 MDS fixed, TAA can be mitigated by + VERW or TSX_CTRL_MSR + ======= ========= ============= ======================================== + +Mitigation selection guide +-------------------------- + +1. Trusted userspace and guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If all user space applications are from a trusted source and do not execute +untrusted code which is supplied externally, then the mitigation can be +disabled. The same applies to virtualized environments with trusted guests. + + +2. Untrusted userspace and guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If there are untrusted applications or guests on the system, enabling TSX +might allow a malicious actor to leak data from the host or from other +processes running on the same physical core. + +If the microcode is available and the TSX is disabled on the host, attacks +are prevented in a virtualized environment as well, even if the VMs do not +explicitly enable the mitigation. + + +.. _taa_default_mitigations: + +Default mitigations +------------------- + +The kernel's default action for vulnerable processors is: + + - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off). diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 194ec2d0140941b35a9742bfea1a067b6fcdf890..4d2938c1c6c63f7948b37eda72c03134ed5c9989 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1997,6 +1997,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted. KVM MMU at runtime. Default is 0 (off) + kvm.nx_huge_pages= + [KVM] Controls the software workaround for the + X86_BUG_ITLB_MULTIHIT bug. + force : Always deploy workaround. + off : Never deploy workaround. + auto : Deploy workaround based on the presence of + X86_BUG_ITLB_MULTIHIT. + + Default is 'auto'. + + If the software workaround is enabled for the host, + guests do need not to enable it for nested guests. + + kvm.nx_huge_pages_recovery_ratio= + [KVM] Controls how many 4KiB pages are periodically zapped + back to huge pages. 0 disables the recovery, otherwise if + the value is N KVM will zap 1/Nth of the 4KiB pages every + minute. The default is 60. + kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM. Default is 1 (enabled) @@ -2368,6 +2387,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. SMT on vulnerable CPUs off - Unconditionally disable MDS mitigation + On TAA-affected machines, mds=off can be prevented by + an active TAA mitigation as both vulnerabilities are + mitigated with the same mechanism so in order to disable + this mitigation, you need to specify tsx_async_abort=off + too. + Not specifying this option is equivalent to mds=full. @@ -2512,6 +2537,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spec_store_bypass_disable=off [X86] l1tf=off [X86] mds=off [X86] + tsx_async_abort=off [X86] + kvm.nx_huge_pages=off [X86] + + Exceptions: + This does not have any effect on + kvm.nx_huge_pages when + kvm.nx_huge_pages=force. auto (default) Mitigate all CPU vulnerabilities, but leave SMT @@ -2527,6 +2559,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. be fully mitigated, even if it means losing SMT. Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] + tsx_async_abort=full,nosmt [X86] mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this @@ -4550,6 +4583,76 @@ bytes respectively. Such letter suffixes can also be entirely omitted. platforms where RDTSC is slow and this accounting can add overhead. + tsx= [X86] Control Transactional Synchronization + Extensions (TSX) feature in Intel processors that + support TSX control. + + This parameter controls the TSX feature. The options are: + + on - Enable TSX on the system. Although there are + mitigations for all known security vulnerabilities, + TSX has been known to be an accelerator for + several previous speculation-related CVEs, and + so there may be unknown security risks associated + with leaving it enabled. + + off - Disable TSX on the system. (Note that this + option takes effect only on newer CPUs which are + not vulnerable to MDS, i.e., have + MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get + the new IA32_TSX_CTRL MSR through a microcode + update. This new MSR allows for the reliable + deactivation of the TSX functionality.) + + auto - Disable TSX if X86_BUG_TAA is present, + otherwise enable TSX on the system. + + Not specifying this option is equivalent to tsx=off. + + See Documentation/hw-vuln/tsx_async_abort.rst + for more details. + + tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async + Abort (TAA) vulnerability. + + Similar to Micro-architectural Data Sampling (MDS) + certain CPUs that support Transactional + Synchronization Extensions (TSX) are vulnerable to an + exploit against CPU internal buffers which can forward + information to a disclosure gadget under certain + conditions. + + In vulnerable processors, the speculatively forwarded + data can be used in a cache side channel attack, to + access data to which the attacker does not have direct + access. + + This parameter controls the TAA mitigation. The + options are: + + full - Enable TAA mitigation on vulnerable CPUs + if TSX is enabled. + + full,nosmt - Enable TAA mitigation and disable SMT on + vulnerable CPUs. If TSX is disabled, SMT + is not disabled because CPU is not + vulnerable to cross-thread TAA attacks. + off - Unconditionally disable TAA mitigation + + On MDS-affected machines, tsx_async_abort=off can be + prevented by an active MDS mitigation as both vulnerabilities + are mitigated with the same mechanism so in order to disable + this mitigation, you need to specify mds=off too. + + Not specifying this option is equivalent to + tsx_async_abort=full. On CPUs which are MDS affected + and deploy MDS mitigation, TAA mitigation is not + required and doesn't provide any additional + mitigation. + + For details see: + Documentation/hw-vuln/tsx_async_abort.rst + turbografx.map[2|3]= [HW,JOY] TurboGraFX parallel port interface Format: diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt deleted file mode 100644 index aee715af7db741be230d754dc468bbb9709e81d6..0000000000000000000000000000000000000000 --- a/Documentation/usb/rio.txt +++ /dev/null @@ -1,138 +0,0 @@ -Copyright (C) 1999, 2000 Bruce Tenison -Portions Copyright (C) 1999, 2000 David Nelson -Thanks to David Nelson for guidance and the usage of the scanner.txt -and scanner.c files to model our driver and this informative file. - -Mar. 2, 2000 - -CHANGES - -- Initial Revision - - -OVERVIEW - -This README will address issues regarding how to configure the kernel -to access a RIO 500 mp3 player. -Before I explain how to use this to access the Rio500 please be warned: - -W A R N I N G: --------------- - -Please note that this software is still under development. The authors -are in no way responsible for any damage that may occur, no matter how -inconsequential. - -It seems that the Rio has a problem when sending .mp3 with low batteries. -I suggest when the batteries are low and you want to transfer stuff that you -replace it with a fresh one. In my case, what happened is I lost two 16kb -blocks (they are no longer usable to store information to it). But I don't -know if that's normal or not; it could simply be a problem with the flash -memory. - -In an extreme case, I left my Rio playing overnight and the batteries wore -down to nothing and appear to have corrupted the flash memory. My RIO -needed to be replaced as a result. Diamond tech support is aware of the -problem. Do NOT allow your batteries to wear down to nothing before -changing them. It appears RIO 500 firmware does not handle low battery -power well at all. - -On systems with OHCI controllers, the kernel OHCI code appears to have -power on problems with some chipsets. If you are having problems -connecting to your RIO 500, try turning it on first and then plugging it -into the USB cable. - -Contact information: --------------------- - - The main page for the project is hosted at sourceforge.net in the following - URL: . You can also go to the project's - sourceforge home page at: . - There is also a mailing list: rio500-users@lists.sourceforge.net - -Authors: -------- - -Most of the code was written by Cesar Miquel . Keith -Clayton is incharge of the PPC port and making sure -things work there. Bruce Tenison is adding support -for .fon files and also does testing. The program will mostly sure be -re-written and Pete Ikusz along with the rest will re-design it. I would -also like to thank Tri Nguyen who provided use -with some important information regarding the communication with the Rio. - -ADDITIONAL INFORMATION and Userspace tools - -http://rio500.sourceforge.net/ - - -REQUIREMENTS - -A host with a USB port. Ideally, either a UHCI (Intel) or OHCI -(Compaq and others) hardware port should work. - -A Linux development kernel (2.3.x) with USB support enabled or a -backported version to linux-2.2.x. See http://www.linux-usb.org for -more information on accomplishing this. - -A Linux kernel with RIO 500 support enabled. - -'lspci' which is only needed to determine the type of USB hardware -available in your machine. - -CONFIGURATION - -Using `lspci -v`, determine the type of USB hardware available. - - If you see something like: - - USB Controller: ...... - Flags: ..... - I/O ports at .... - - Then you have a UHCI based controller. - - If you see something like: - - USB Controller: ..... - Flags: .... - Memory at ..... - - Then you have a OHCI based controller. - -Using `make menuconfig` or your preferred method for configuring the -kernel, select 'Support for USB', 'OHCI/UHCI' depending on your -hardware (determined from the steps above), 'USB Diamond Rio500 support', and -'Preliminary USB device filesystem'. Compile and install the modules -(you may need to execute `depmod -a` to update the module -dependencies). - -Add a device for the USB rio500: - `mknod /dev/usb/rio500 c 180 64` - -Set appropriate permissions for /dev/usb/rio500 (don't forget about -group and world permissions). Both read and write permissions are -required for proper operation. - -Load the appropriate modules (if compiled as modules): - - OHCI: - modprobe usbcore - modprobe usb-ohci - modprobe rio500 - - UHCI: - modprobe usbcore - modprobe usb-uhci (or uhci) - modprobe rio500 - -That's it. The Rio500 Utils at: http://rio500.sourceforge.net should -be able to access the rio500. - -BUGS - -If you encounter any problems feel free to drop me an email. - -Bruce Tenison -btenison@dibbs.net - diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index e5dd9f4d61008ad6431e067b900608788e573020..46ef3680c8abc62d2afd6eeb50b7dc71b309bb33 100644 --- a/Documentation/virtual/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt @@ -13,8 +13,8 @@ The acquisition orders for mutexes are as follows: - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring them together is quite rare. -For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything -else is a leaf: no other lock is taken inside the critical sections. +Everything else is a leaf: no other lock is taken inside the critical +sections. 2: Exception ------------ @@ -142,7 +142,7 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update(). ------------ Name: kvm_lock -Type: spinlock_t +Type: mutex Arch: any Protects: - vm_list diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst index ef389dcf1b1d3cc6a01d334fff858c21c696c931..0780d55c5aa82835600973f16e51f3195d374259 100644 --- a/Documentation/x86/index.rst +++ b/Documentation/x86/index.rst @@ -6,3 +6,4 @@ x86 architecture specifics :maxdepth: 1 mds + tsx_async_abort diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst new file mode 100644 index 0000000000000000000000000000000000000000..4a4336a893722aea6f28f634c4e0f07dd594e615 --- /dev/null +++ b/Documentation/x86/tsx_async_abort.rst @@ -0,0 +1,117 @@ +.. SPDX-License-Identifier: GPL-2.0 + +TSX Async Abort (TAA) mitigation +================================ + +.. _tsx_async_abort: + +Overview +-------- + +TSX Async Abort (TAA) is a side channel attack on internal buffers in some +Intel processors similar to Microachitectural Data Sampling (MDS). In this +case certain loads may speculatively pass invalid data to dependent operations +when an asynchronous abort condition is pending in a Transactional +Synchronization Extensions (TSX) transaction. This includes loads with no +fault or assist condition. Such loads may speculatively expose stale data from +the same uarch data structures as in MDS, with same scope of exposure i.e. +same-thread and cross-thread. This issue affects all current processors that +support TSX. + +Mitigation strategy +------------------- + +a) TSX disable - one of the mitigations is to disable TSX. A new MSR +IA32_TSX_CTRL will be available in future and current processors after +microcode update which can be used to disable TSX. In addition, it +controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID. + +b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this +vulnerability. More details on this approach can be found in +:ref:`Documentation/hw-vuln/mds.rst `. + +Kernel internal mitigation modes +-------------------------------- + + ============= ============================================================ + off Mitigation is disabled. Either the CPU is not affected or + tsx_async_abort=off is supplied on the kernel command line. + + tsx disabled Mitigation is enabled. TSX feature is disabled by default at + bootup on processors that support TSX control. + + verw Mitigation is enabled. CPU is affected and MD_CLEAR is + advertised in CPUID. + + ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not + advertised in CPUID. That is mainly for virtualization + scenarios where the host has the updated microcode but the + hypervisor does not expose MD_CLEAR in CPUID. It's a best + effort approach without guarantee. + ============= ============================================================ + +If the CPU is affected and the "tsx_async_abort" kernel command line parameter is +not provided then the kernel selects an appropriate mitigation depending on the +status of RTM and MD_CLEAR CPUID bits. + +Below tables indicate the impact of tsx=on|off|auto cmdline options on state of +TAA mitigation, VERW behavior and TSX feature for various combinations of +MSR_IA32_ARCH_CAPABILITIES bits. + +1. "tsx=off" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Disabled Yes TSX disabled TSX disabled + 1 X 1 Disabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +2. "tsx=on" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Enabled Yes None Same as MDS + 1 X 1 Enabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +3. "tsx=auto" + +========= ========= ============ ============ ============== =================== ====================== +MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto +---------------------------------- ------------------------------------------------------------------------- +TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation + after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full +========= ========= ============ ============ ============== =================== ====================== + 0 0 0 HW default Yes Same as MDS Same as MDS + 0 0 1 Invalid case Invalid case Invalid case Invalid case + 0 1 0 HW default No Need ucode update Need ucode update + 0 1 1 Disabled Yes TSX disabled TSX disabled + 1 X 1 Enabled X None needed None needed +========= ========= ============ ============ ============== =================== ====================== + +In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that +indicates whether MSR_IA32_TSX_CTRL is supported. + +There are two control bits in IA32_TSX_CTRL MSR: + + Bit 0: When set it disables the Restricted Transactional Memory (RTM) + sub-feature of TSX (will force all transactions to abort on the + XBEGIN instruction). + + Bit 1: When set it disables the enumeration of the RTM and HLE feature + (i.e. it will make CPUID(EAX=7).EBX{bit4} and + CPUID(EAX=7).EBX{bit11} read as 0). diff --git a/MAINTAINERS b/MAINTAINERS index 5ea427b21f73b88039148ef34d623d627e39e4df..01e7290ef9058682998a2799d280ab4d26a899c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12486,13 +12486,6 @@ W: http://www.linux-usb.org/usbnet S: Maintained F: drivers/net/usb/dm9601.c -USB DIAMOND RIO500 DRIVER -M: Cesar Miquel -L: rio500-users@lists.sourceforge.net -W: http://rio500.sourceforge.net -S: Maintained -F: drivers/usb/misc/rio500* - USB EHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org diff --git a/Makefile b/Makefile index eb47757c27f5532b797407b41bef2022a2b8e4a5..3d2af23a0c0944c67554346dd696d4918fe73f43 100755 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 193 +SUBLEVEL = 205 EXTRAVERSION = NAME = Roaring Lionus @@ -940,6 +940,18 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) # enforce correct pointer usage KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) +# Require designated initializers for all marked structures +KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + +# change __FILE__ to the relative path from the srctree +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 2ce24e74f87956af0bba1d9430200196353bdff8..a509b77ef80d16c41a2380bba37c2bbe91106d7b 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -488,8 +488,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev) /* loop thru all available h/w condition indexes */ for (j = 0; j < cc_bcr.c; j++) { write_aux_reg(ARC_REG_CC_INDEX, j); - cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); - cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); + cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0)); + cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1)); /* See if it has been mapped to a perf event_id */ for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index 2fb0cd39a31c81e2b4b01471a548dbefd8793662..cd6e3615e3d1d0c45c8f88b299e8e81a56fe9f46 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -163,3 +163,4 @@ void abort(void) { __asm__ __volatile__("trap_s 5\n"); } +EXPORT_SYMBOL(abort); diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h index 17ae0f3efac8e71d45043252e32298772ee43237..005bf4ff1b4cb9a9e31558fb146291252039b829 100644 --- a/arch/arm/boot/compressed/libfdt_env.h +++ b/arch/arm/boot/compressed/libfdt_env.h @@ -5,6 +5,8 @@ #include #include +#define INT_MAX ((int)(~0U>>1)) + typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index e82432c79f85f3a0a1d089cb3ba7a633e96bf9df..3f3ad09c7cd5fc3064f7373c8de59d7187887359 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -701,6 +701,7 @@ pinctrl-0 = <&cpsw_default>; pinctrl-1 = <&cpsw_sleep>; status = "okay"; + slaves = <1>; }; &davinci_mdio { @@ -708,15 +709,14 @@ pinctrl-0 = <&davinci_mdio_default>; pinctrl-1 = <&davinci_mdio_sleep>; status = "okay"; -}; -&cpsw_emac0 { - phy_id = <&davinci_mdio>, <0>; - phy-mode = "rgmii-txid"; + ethphy0: ethernet-phy@0 { + reg = <0>; + }; }; -&cpsw_emac1 { - phy_id = <&davinci_mdio>, <1>; +&cpsw_emac0 { + phy-handle = <ðphy0>; phy-mode = "rgmii-txid"; }; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index c9c9a47446e8e665e2821b1a397da051639da406..56224aa5e83eeb6286ba812d3633328531d55f8d 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -1117,6 +1117,8 @@ ti,hwmods = "dss_dispc"; clocks = <&disp_clk>; clock-names = "fck"; + + max-memory-bandwidth = <230000000>; }; rfbi: rfbi@4832a800 { diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi index e2e9599596e25be3600ff40a6b1b74d62e3bcdb0..05379b6c1c13b88fcb426964f9eceed97aa8c6f6 100644 --- a/arch/arm/boot/dts/arm-realview-eb.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb.dtsi @@ -334,7 +334,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp: ssp@1000d000 { + ssp: spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; clocks = <&sspclk>, <&pclk>; diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index c789564f2803334e891911a6a75b171fc28d5165..c1fd5615ddfe3ab8c1ef01bd7e8ec19c4d043928 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -343,7 +343,7 @@ clock-names = "apb_pclk"; }; - pb1176_ssp: ssp@1010b000 { + pb1176_ssp: spi@1010b000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1010b000 0x1000>; interrupt-parent = <&intc_dc1176>; diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts index 3944765ac4b067b0d12c9edf3e4ff34bcf720d62..e306f1cceb4ecced70fc2879fb2e23062b60c5a5 100644 --- a/arch/arm/boot/dts/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts @@ -480,7 +480,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp@1000d000 { + spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; interrupt-parent = <&intc_pb11mp>; diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi index aeb49c4bd773f40fea508ea2e2ab342270664030..2bf3958b2e6b9fe605710947d1ad1c81902a4d51 100644 --- a/arch/arm/boot/dts/arm-realview-pbx.dtsi +++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi @@ -318,7 +318,7 @@ clock-names = "uartclk", "apb_pclk"; }; - ssp: ssp@1000d000 { + ssp: spi@1000d000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x1000d000 0x1000>; clocks = <&sspclk>, <&pclk>; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index b3501ae2a3bd19211769bdd00ee18d4245562bdf..4fba898b8f4f9342e65535b102c4ce4f281f95d4 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -546,7 +546,7 @@ }; }; - uart1 { + usart1 { pinctrl_usart1: usart1-0 { atmel,pins = ; - si5351: clock-generator { + si5351: clock-generator@60 { compatible = "silabs,si5351a-msop"; reg = <0x60>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 698d58cea20d2e8c1a3c7d8e7a4dadb717847624..11342aeccb73a345aebebae985b859d0809830d2 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -152,7 +152,7 @@ 0xffffe000 MBUS_ID(0x03, 0x01) 0 0x0000800 /* CESA SRAM 2k */ 0xfffff000 MBUS_ID(0x0d, 0x00) 0 0x0000800>; /* PMU SRAM 2k */ - spi0: spi-ctrl@10600 { + spi0: spi@10600 { compatible = "marvell,orion-spi"; #address-cells = <1>; #size-cells = <0>; @@ -165,7 +165,7 @@ status = "disabled"; }; - i2c: i2c-ctrl@11000 { + i2c: i2c@11000 { compatible = "marvell,mv64xxx-i2c"; reg = <0x11000 0x20>; #address-cells = <1>; @@ -215,7 +215,7 @@ status = "disabled"; }; - spi1: spi-ctrl@14600 { + spi1: spi@14600 { compatible = "marvell,orion-spi"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 6098dacd09f11c76954830435498882fe4bda50e..1b2709af2a42bbb0fafd3996982d4590ea256573 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -170,6 +170,8 @@ reg = <0x66>; interrupt-parent = <&gpx3>; interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&s5m8767_irq>; vinb1-supply = <&main_dc_reg>; vinb2-supply = <&main_dc_reg>; @@ -547,6 +549,13 @@ cap-sd-highspeed; }; +&pinctrl_0 { + s5m8767_irq: s5m8767-irq { + samsung,pins = "gpx3-2"; + samsung,pin-pud = ; + }; +}; + &rtc { status = "okay"; }; diff --git a/arch/arm/boot/dts/exynos5250-snow-rev5.dts b/arch/arm/boot/dts/exynos5250-snow-rev5.dts index 90560c316f6444c45757ee7edf0ff4bb21b3f246..cb986175b69b42dd4a8eb0669259089d88791a23 100644 --- a/arch/arm/boot/dts/exynos5250-snow-rev5.dts +++ b/arch/arm/boot/dts/exynos5250-snow-rev5.dts @@ -23,6 +23,14 @@ samsung,model = "Snow-I2S-MAX98090"; samsung,audio-codec = <&max98090>; + + cpu { + sound-dai = <&i2s0 0>; + }; + + codec { + sound-dai = <&max98090 0>, <&hdmi>; + }; }; }; @@ -34,6 +42,9 @@ interrupt-parent = <&gpx0>; pinctrl-names = "default"; pinctrl-0 = <&max98090_irq>; + clocks = <&pmu_system_controller 0>; + clock-names = "mclk"; + #sound-dai-cells = <1>; }; }; diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index ec4a00f1ce01e7a607844dcfb38a951e4d49eec4..c9d379b1a16690a0d3e5e71967ad3af3a88cece2 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -302,6 +302,7 @@ regulator-name = "vdd_1v35"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <1350000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -323,6 +324,7 @@ regulator-name = "vdd_2v"; regulator-min-microvolt = <2000000>; regulator-max-microvolt = <2000000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -333,6 +335,7 @@ regulator-name = "vdd_1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -427,6 +430,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 01f466816fea714f1056c041965205dead9db615..ae58b8d6f6144389043e3c64ccad729b318b4b2e 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -302,6 +302,7 @@ regulator-name = "vdd_1v35"; regulator-min-microvolt = <1350000>; regulator-max-microvolt = <1350000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -323,6 +324,7 @@ regulator-name = "vdd_2v"; regulator-min-microvolt = <2000000>; regulator-max-microvolt = <2000000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -333,6 +335,7 @@ regulator-name = "vdd_1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-boot-on; regulator-state-mem { regulator-on-in-suspend; @@ -427,6 +430,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts index 2051306008534af0f60136db87861aed34047362..72d1b8209f5e660c89f97c8d108e772650bca199 100644 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts @@ -43,7 +43,7 @@ <&clks IMX7D_ENET1_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; @@ -69,7 +69,7 @@ <&clks IMX7D_ENET2_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy1>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index edc5ddeb851a7e09ad65b216072cc4324a8969d8..0a7ea1a765f999ee220585ca1fc74d6b5163f540 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -437,7 +437,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302d0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT1_ROOT_CLK>, <&clks IMX7D_GPT1_ROOT_CLK>; clock-names = "ipg", "per"; }; @@ -446,7 +446,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302e0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT2_ROOT_CLK>, <&clks IMX7D_GPT2_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; @@ -456,7 +456,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x302f0000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT3_ROOT_CLK>, <&clks IMX7D_GPT3_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; @@ -466,7 +466,7 @@ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt"; reg = <0x30300000 0x10000>; interrupts = ; - clocks = <&clks IMX7D_CLK_DUMMY>, + clocks = <&clks IMX7D_GPT4_ROOT_CLK>, <&clks IMX7D_GPT4_ROOT_CLK>; clock-names = "ipg", "per"; status = "disabled"; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index ceb49d15d243c0204c4b243881cce14434170acd..20ee7ca8c6534142e70a06186ccbc36d2f420301 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -266,3 +266,7 @@ &twl_gpio { ti,use-leds; }; + +&twl_keypad { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi index 0d20aadc78bb1e56b87014ff384461b2ac0c8cc9..5fa3111731cb0530e9bbbce6b9ed9be81daecf2d 100644 --- a/arch/arm/boot/dts/lpc32xx.dtsi +++ b/arch/arm/boot/dts/lpc32xx.dtsi @@ -179,7 +179,7 @@ * ssp0 and spi1 are shared pins; * enable one in your board dts, as needed. */ - ssp0: ssp@20084000 { + ssp0: spi@20084000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x20084000 0x1000>; interrupts = <20 IRQ_TYPE_LEVEL_HIGH>; @@ -199,7 +199,7 @@ * ssp1 and spi2 are shared pins; * enable one in your board dts, as needed. */ - ssp1: ssp@2008c000 { + ssp1: spi@2008c000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x2008c000 0x1000>; interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index b3a8b1f24499afe232d752ba16e15e45e4280d5b..7191506934494663710fb703ec9455dcd83b2517 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -28,6 +28,7 @@ aliases { display0 = &lcd; + display1 = &tv0; }; gpio-keys { @@ -70,7 +71,7 @@ #sound-dai-cells = <0>; }; - spi_lcd { + spi_lcd: spi_lcd { compatible = "spi-gpio"; #address-cells = <0x1>; #size-cells = <0x0>; @@ -122,7 +123,7 @@ }; tv0: connector { - compatible = "svideo-connector"; + compatible = "composite-video-connector"; label = "tv"; port { @@ -134,7 +135,7 @@ tv_amp: opa362 { compatible = "ti,opa362"; - enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; + enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; /* GPIO_23 to enable video out amplifier */ ports { #address-cells = <1>; @@ -273,6 +274,13 @@ OMAP3_CORE1_IOPAD(0x2134, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio112 */ >; }; + + penirq_pins: pinmux_penirq_pins { + pinctrl-single,pins = < + /* here we could enable to wakeup the cpu from suspend by a pen touch */ + OMAP3_CORE1_IOPAD(0x2194, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio160 */ + >; + }; }; &omap3_pmx_core2 { @@ -410,10 +418,19 @@ tsc2007@48 { compatible = "ti,tsc2007"; reg = <0x48>; + pinctrl-names = "default"; + pinctrl-0 = <&penirq_pins>; interrupt-parent = <&gpio6>; interrupts = <0 IRQ_TYPE_EDGE_FALLING>; /* GPIO_160 */ - gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* GPIO_160 */ ti,x-plate-ohms = <600>; + touchscreen-size-x = <480>; + touchscreen-size-y = <640>; + touchscreen-max-pressure = <1000>; + touchscreen-fuzz-x = <3>; + touchscreen-fuzz-y = <8>; + touchscreen-fuzz-pressure = <10>; + touchscreen-inverted-y; }; /* RFID EEPROM */ @@ -519,6 +536,12 @@ regulator-max-microvolt = <3150000>; }; +/* Needed to power the DPI pins */ + +&vpll2 { + regulator-always-on; +}; + &dss { pinctrl-names = "default"; pinctrl-0 = < &dss_dpi_pins >; @@ -539,10 +562,14 @@ vdda-supply = <&vdac>; + #address-cells = <1>; + #size-cells = <0>; + port { + reg = <0>; venc_out: endpoint { remote-endpoint = <&opa_in>; - ti,channels = <2>; + ti,channels = <1>; ti,invert-polarity; }; }; @@ -586,22 +613,22 @@ bootloaders@80000 { label = "U-Boot"; - reg = <0x80000 0x1e0000>; + reg = <0x80000 0x1c0000>; }; - bootloaders_env@260000 { + bootloaders_env@240000 { label = "U-Boot Env"; - reg = <0x260000 0x20000>; + reg = <0x240000 0x40000>; }; kernel@280000 { label = "Kernel"; - reg = <0x280000 0x400000>; + reg = <0x280000 0x600000>; }; - filesystem@680000 { + filesystem@880000 { label = "File System"; - reg = <0x680000 0xf980000>; + reg = <0x880000 0>; /* 0 = MTDPART_SIZ_FULL */ }; }; }; diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 4caadb25324977e67c40d4ebde2929cd6782cbf8..e412373fe7bfdf328f10a48cdc84d6373ce51977 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -694,6 +694,11 @@ vbus-supply = <&smps10_out1_reg>; }; +&dwc3 { + extcon = <&extcon_usb3>; + dr_mode = "otg"; +}; + &mcspi1 { }; diff --git a/arch/arm/boot/dts/orion5x-linkstation.dtsi b/arch/arm/boot/dts/orion5x-linkstation.dtsi index ed456ab35fd84ae4eddd49dd9c2542bbfc5fcc0e..c1bc8376d4eb0b2d754e09ef64f762e175ecf346 100644 --- a/arch/arm/boot/dts/orion5x-linkstation.dtsi +++ b/arch/arm/boot/dts/orion5x-linkstation.dtsi @@ -156,7 +156,7 @@ &i2c { status = "okay"; - rtc { + rtc@32 { compatible = "ricoh,rs5c372a"; reg = <0x32>; }; diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi index 9e73dc6b3ed3ef269474c4fb05867f0b18166752..0e1320afa15620df2eb2781da0c0fae613788b70 100644 --- a/arch/arm/boot/dts/pxa27x.dtsi +++ b/arch/arm/boot/dts/pxa27x.dtsi @@ -70,7 +70,7 @@ clocks = <&clks CLK_PWM1>; }; - pwri2c: i2c@40f000180 { + pwri2c: i2c@40f00180 { compatible = "mrvl,pxa-i2c"; reg = <0x40f00180 0x24>; interrupts = <6>; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 4b7d97275c621af5a219856d38f489a3e704b2e7..5ee84e3cb3e97e04b682d393ba2e51a914b9d25c 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -211,7 +211,7 @@ saw0: regulator@b089000 { compatible = "qcom,saw2"; - reg = <0x02089000 0x1000>, <0x0b009000 0x1000>; + reg = <0x0b089000 0x1000>, <0x0b009000 0x1000>; regulator; }; diff --git a/arch/arm/boot/dts/qcom/sa415m-cdp.dts b/arch/arm/boot/dts/qcom/sa415m-cdp.dts index b021bf0d5235574f630f5d285b4f4e3036f3dfa7..b9e7d4947c6e906a3541f7907ce29d6e0cf2afa1 100644 --- a/arch/arm/boot/dts/qcom/sa415m-cdp.dts +++ b/arch/arm/boot/dts/qcom/sa415m-cdp.dts @@ -12,7 +12,7 @@ /dts-v1/; -#include "sa415m-cdp.dtsi" +#include "sdxpoorwills-cdp.dtsi" #include "sdxpoorwills-display.dtsi" #include "qpic-panel-ili-hvga.dtsi" diff --git a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi b/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi deleted file mode 100644 index b7996e57e1b86fc754ccd88f7fa5dbbc49eccf86..0000000000000000000000000000000000000000 --- a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "sdxpoorwills-cdp.dtsi" - -&vbus_detect { - status = "okay"; -}; - -&qcom_seecom { - status = "okay"; -}; - -&usb { - status = "okay"; - qcom,connector-type-uAB; - extcon = <0>, <0>, <0>, <&vbus_detect>; - dwc3@a600000 { - normal-eps-in-gsi-mode; - }; -}; - -&smb138x { - status = "disabled"; -}; - -&thermal_zones { - mdm-core-step { - trips { - mdm-step-trip-0 { - temperature = <105000>; - }; - - mdm-step-trip-1 { - temperature = <115000>; - }; - }; - }; -}; diff --git a/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts b/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts index 5542ab42397a64800a4f41cc75fb3b36b06d21c2..c8b78915dac0cfbda769ca5fa212165e366dfeec 100644 --- a/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts +++ b/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts @@ -12,7 +12,7 @@ /dts-v1/; -#include "sa415m-cdp.dtsi" +#include "sdxpoorwills-cdp.dtsi" #include "sdxpoorwills-v2.dtsi" #include "sdxpoorwills-display.dtsi" #include "qpic-panel-ili-hvga.dtsi" diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index a935523a1eb85a0314788f8f707c4d95a7a2d49f..147c73f68f1d9ffb3347a311f6a3a8f57d6fa534 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -744,7 +744,7 @@ /* no rts / cts for uart2 */ }; - spi { + spi-pins { spi_txd:spi-txd { rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>; }; diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts index afea3645ada43500eb0f2903b904760b169eddde..89d55894d91622707b3acbaa993f53be66e35090 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts @@ -88,7 +88,7 @@ status = "okay"; speed-mode = <0>; - adxl345: adxl345@0 { + adxl345: adxl345@53 { compatible = "adi,adxl345"; reg = <0x53>; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index d309314f3a364fd138e0be2befe4ff30712dee38..5f1769209526a8fdcfb711985a3819d81ce6f364 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -188,7 +188,7 @@ <0xa0410100 0x100>; }; - scu@a04100000 { + scu@a0410000 { compatible = "arm,cortex-a9-scu"; reg = <0xa0410000 0x100>; }; @@ -864,7 +864,7 @@ power-domains = <&pm_domains DOMAIN_VAPE>; }; - ssp@80002000 { + spi@80002000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x80002000 0x1000>; interrupts = ; @@ -878,7 +878,7 @@ power-domains = <&pm_domains DOMAIN_VAPE>; }; - ssp@80003000 { + spi@80003000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x80003000 0x1000>; interrupts = ; diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi index 5c5cea232743d111f154cd0faa0517b44f6a70ab..1ec193b0c5065b794838705702a99bd48c95586c 100644 --- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi +++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi @@ -607,16 +607,20 @@ mcde { lcd_default_mode: lcd_default { - default_mux { + default_mux1 { /* Mux in VSI0 and all the data lines */ function = "lcd"; groups = "lcdvsi0_a_1", /* VSI0 for LCD */ "lcd_d0_d7_a_1", /* Data lines */ "lcd_d8_d11_a_1", /* TV-out */ - "lcdaclk_b_1", /* Clock line for TV-out */ "lcdvsi1_a_1"; /* VSI1 for HDMI */ }; + default_mux2 { + function = "lcda"; + groups = + "lcdaclk_b_1"; /* Clock line for TV-out */ + }; default_cfg1 { pins = "GPIO68_E1", /* VSI0 */ diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi index ece222d51717c294385fd2edc8960b1ebd6f471b..cf8d03bc42c1501b705c8d9741e0ce1ab5882214 100644 --- a/arch/arm/boot/dts/ste-hrefprev60.dtsi +++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi @@ -57,7 +57,7 @@ }; }; - ssp@80002000 { + spi@80002000 { /* * On the first generation boards, this SSP/SPI port was connected * to the AB8500. diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 386eee6de2320aa60365095d74378ce11a865f0d..272d36c3d223b64b30b93fed0bc2bc15b1477c12 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -386,7 +386,7 @@ pinctrl-1 = <&i2c3_sleep_mode>; }; - ssp@80002000 { + spi@80002000 { pinctrl-names = "default"; pinctrl-0 = <&ssp0_snowball_mode>; }; diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts index 2f5107ffeef047e4cce0d9925ae6d8fbc3f60079..ea6768b96a9dfcae474b27160ec67a75bf5d9d7b 100644 --- a/arch/arm/boot/dts/ste-u300.dts +++ b/arch/arm/boot/dts/ste-u300.dts @@ -441,7 +441,7 @@ dma-names = "rx"; }; - spi: ssp@c0006000 { + spi: spi@c0006000 { compatible = "arm,pl022", "arm,primecell"; reg = <0xc0006000 0x1000>; interrupt-parent = <&vica>; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index b4bfa5586c233a48b3aeba14b53c3187e245b753..23d4c837b87a6658debe70cf61592e0620847309 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -521,10 +521,10 @@ gpio-keys { compatible = "gpio-keys"; - power { - label = "Power"; + wakeup { + label = "Wakeup"; gpios = <&gpio TEGRA_GPIO(J, 7) GPIO_ACTIVE_LOW>; - linux,code = ; + linux,code = ; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi index 192b95177aac38017b06b8287f4af087a1e2d643..826bdd0b8a257bdc611fe31dd415661af7c9c697 100644 --- a/arch/arm/boot/dts/tegra30-apalis.dtsi +++ b/arch/arm/boot/dts/tegra30-apalis.dtsi @@ -147,14 +147,14 @@ /* Apalis MMC1 */ sdmmc3_clk_pa6 { - nvidia,pins = "sdmmc3_clk_pa6", - "sdmmc3_cmd_pa7"; + nvidia,pins = "sdmmc3_clk_pa6"; nvidia,function = "sdmmc3"; nvidia,pull = ; nvidia,tristate = ; }; sdmmc3_dat0_pb7 { - nvidia,pins = "sdmmc3_dat0_pb7", + nvidia,pins = "sdmmc3_cmd_pa7", + "sdmmc3_dat0_pb7", "sdmmc3_dat1_pb6", "sdmmc3_dat2_pb5", "sdmmc3_dat3_pb4", diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index 5030065cbdfe38df667fb8cc9e89e4154d5a7798..ad30d2a51af15bf155a08c8762b0d4022ab988fe 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -823,7 +823,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <1>; nvidia,xcvr-lsrslew = <1>; nvidia,xcvr-hsslew = <32>; @@ -860,7 +860,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <2>; nvidia,xcvr-lsrslew = <2>; nvidia,xcvr-hsslew = <32>; @@ -896,7 +896,7 @@ nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <51>; - nvidia.xcvr-setup-use-fuses; + nvidia,xcvr-setup-use-fuses; nvidia,xcvr-lsfslew = <2>; nvidia,xcvr-lsrslew = <2>; nvidia,xcvr-hsslew = <32>; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 409e069b3a845667df387c1decc7c811aa8359bd..00d7d28e86f0bc6dd2be874a10ab48a7d1bbd6ef 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -303,7 +303,7 @@ clock-names = "apb_pclk"; }; - ssp@101f4000 { + spi@101f4000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x101f4000 0x1000>; interrupts = <11>; diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig index d590098783127816c95d2140a5769730780cb78d..067d73e3b28b66ddb1995ff3bf7f135fa52af52f 100644 --- a/arch/arm/configs/badge4_defconfig +++ b/arch/arm/configs/badge4_defconfig @@ -97,7 +97,6 @@ CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_RIO500=m CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m CONFIG_MSDOS_FS=y diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig index c1470a00f55a998f5d7d115d37c1736f82dd9fff..031d9d3549b9da8e31b30e3284509c2097121f8b 100644 --- a/arch/arm/configs/corgi_defconfig +++ b/arch/arm/configs/corgi_defconfig @@ -207,7 +207,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig index 458d60a2b26ea509465c132b9499090b91b9de6e..4ad91f33b4fb7f6698f0c9e8b63cccc4cb7fc208 100755 --- a/arch/arm/configs/msm8909-perf_defconfig +++ b/arch/arm/configs/msm8909-perf_defconfig @@ -400,6 +400,7 @@ CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_QPNP_REVID=y CONFIG_USB_BAM=y +CONFIG_MSM_RMNET_BAM=y CONFIG_MSM_MDSS_PLL=y CONFIG_REMOTE_SPINLOCK_MSM=y CONFIG_MAILBOX=y diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig index 38d48acbffa5f462b04b68710c365652ef8b2b1c..61e623d5ecf50c509e9d83df4ca535d87b4c5aec 100644 --- a/arch/arm/configs/msm8909_defconfig +++ b/arch/arm/configs/msm8909_defconfig @@ -442,6 +442,7 @@ CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_QPNP_REVID=y CONFIG_USB_BAM=y +CONFIG_MSM_RMNET_BAM=y CONFIG_MSM_MDSS_PLL=y CONFIG_REMOTE_SPINLOCK_MSM=y CONFIG_MAILBOX=y diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index a016ecc0084b8b4c0cee559292642255208b8ebf..178ee84dffa18df495140f6a447b79c1ed117db0 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -591,7 +591,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 60d3fecd7a22db7ad87fa9559e66da4447ba261d..dc873d23d60322e31a7bb1d8fe26069f308bb8c9 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -354,7 +354,6 @@ CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/arm/configs/sa415m-perf_defconfig b/arch/arm/configs/sa415m-perf_defconfig index ce243699c51c861decc66bd3a191b79d8a270fdb..ff8219a998f3d512b1f2e5c2bad33f3a38ed6d7d 100644 --- a/arch/arm/configs/sa415m-perf_defconfig +++ b/arch/arm/configs/sa415m-perf_defconfig @@ -185,6 +185,9 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=12 +CONFIG_MHI_BUS=y +CONFIG_MHI_NETDEV=y +CONFIG_MHI_UCI=y CONFIG_MTD=y CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y diff --git a/arch/arm/configs/sa415m_defconfig b/arch/arm/configs/sa415m_defconfig index b28dd77e301957c6732a7aae463fd38974d81e03..72f4ea32dd473bcf442d9244b2d5d6f702a57506 100644 --- a/arch/arm/configs/sa415m_defconfig +++ b/arch/arm/configs/sa415m_defconfig @@ -185,6 +185,10 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=12 +CONFIG_MHI_BUS=y +CONFIG_MHI_DEBUG=y +CONFIG_MHI_NETDEV=y +CONFIG_MHI_UCI=y CONFIG_MTD=y CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y diff --git a/arch/arm/configs/sdm429-bg-perf_defconfig b/arch/arm/configs/sdm429-bg-perf_defconfig index a5bd0bd9718a2e60e18393e772ae78b43fecdb46..7bb5cf938108c94503c74ddc601c92c6d86fe70a 100644 --- a/arch/arm/configs/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/sdm429-bg-perf_defconfig @@ -137,13 +137,9 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y CONFIG_NETFILTER_XT_TARGET_CONNMARK=y CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y @@ -153,11 +149,9 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y CONFIG_NETFILTER_XT_MATCH_HELPER=y CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=y CONFIG_NETFILTER_XT_MATCH_LIMIT=y CONFIG_NETFILTER_XT_MATCH_MAC=y @@ -198,13 +192,6 @@ CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y CONFIG_BRIDGE=y CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=y @@ -213,6 +200,7 @@ CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_U32=y CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_CMP=y CONFIG_NET_EMATCH_NBYTE=y @@ -246,12 +234,9 @@ CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_REQ_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -266,6 +251,7 @@ CONFIG_TUN=y # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PHYLIB=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -273,12 +259,10 @@ CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=y CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y CONFIG_PPPOLAC=y CONFIG_PPPOPNS=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y # CONFIG_WLAN_VENDOR_ADMTEK is not set # CONFIG_WLAN_VENDOR_ATH is not set # CONFIG_WLAN_VENDOR_ATMEL is not set @@ -353,11 +337,10 @@ CONFIG_DEVFREQ_THERMAL=y CONFIG_THERMAL_QPNP=y CONFIG_THERMAL_QPNP_ADC_TM=y CONFIG_THERMAL_TSENS=y +CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y @@ -373,14 +356,10 @@ CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_VIDEO_CLASS=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_MSM_VIDC_3X_V4L2=y CONFIG_MSM_VIDC_3X_GOVERNORS=y CONFIG_ADSP_SHMEM=y -CONFIG_RADIO_IRIS=y -CONFIG_RADIO_IRIS_TRANSPORT=y CONFIG_QCOM_KGSL=y CONFIG_FB=y CONFIG_FB_VIRTUAL=y @@ -403,18 +382,7 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y CONFIG_DUAL_ROLE_USB_INTF=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y @@ -424,8 +392,6 @@ CONFIG_USB_CI13XXX_MSM=y CONFIG_USB_CONFIGFS=y CONFIG_USB_CONFIGFS_SERIAL=y CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_RNDIS=y -CONFIG_USB_CONFIGFS_RMNET_BAM=y CONFIG_USB_CONFIGFS_MASS_STORAGE=y CONFIG_USB_CONFIGFS_F_FS=y CONFIG_USB_CONFIGFS_F_MTP=y @@ -438,7 +404,6 @@ CONFIG_USB_CONFIGFS_F_HID=y CONFIG_USB_CONFIGFS_F_DIAG=y CONFIG_USB_CONFIGFS_F_CDEV=y CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y # CONFIG_PWRSEQ_EMMC is not set @@ -451,7 +416,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_SDHCI_MSM_ICE=y -CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -470,9 +434,6 @@ CONFIG_ASHMEM=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ION=y CONFIG_ION_MSM=y -CONFIG_IPA=y -CONFIG_RMNET_IPA=y -CONFIG_RNDIS_IPA=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_QPNP_REVID=y diff --git a/arch/arm/configs/sdm429-bg_defconfig b/arch/arm/configs/sdm429-bg_defconfig index 4d9fb03cf312cdeb0a92a82e33dee012dedadb28..f0aeb941e349e3390e2a8ab35cea460b1f390fc1 100644 --- a/arch/arm/configs/sdm429-bg_defconfig +++ b/arch/arm/configs/sdm429-bg_defconfig @@ -141,13 +141,9 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y CONFIG_NETFILTER_XT_TARGET_CONNMARK=y CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y @@ -158,11 +154,9 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y CONFIG_NETFILTER_XT_MATCH_HELPER=y CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=y CONFIG_NETFILTER_XT_MATCH_LIMIT=y CONFIG_NETFILTER_XT_MATCH_MAC=y @@ -173,6 +167,7 @@ CONFIG_NETFILTER_XT_MATCH_POLICY=y CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y CONFIG_NETFILTER_XT_MATCH_SOCKET=y CONFIG_NETFILTER_XT_MATCH_STATE=y CONFIG_NETFILTER_XT_MATCH_STATISTIC=y @@ -204,13 +199,6 @@ CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y CONFIG_BRIDGE=y CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=y @@ -219,6 +207,7 @@ CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_U32=y CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_CMP=y CONFIG_NET_EMATCH_NBYTE=y @@ -253,12 +242,9 @@ CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y -CONFIG_DM_REQ_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -273,6 +259,7 @@ CONFIG_TUN=y # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PHYLIB=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -280,12 +267,10 @@ CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=y CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y CONFIG_PPPOLAC=y CONFIG_PPPOPNS=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y # CONFIG_WLAN_VENDOR_ADMTEK is not set # CONFIG_WLAN_VENDOR_ATH is not set # CONFIG_WLAN_VENDOR_ATMEL is not set @@ -362,11 +347,10 @@ CONFIG_DEVFREQ_THERMAL=y CONFIG_THERMAL_QPNP=y CONFIG_THERMAL_QPNP_ADC_TM=y CONFIG_THERMAL_TSENS=y +CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y @@ -382,14 +366,10 @@ CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_VIDEO_CLASS=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_MSM_VIDC_3X_V4L2=y CONFIG_MSM_VIDC_3X_GOVERNORS=y CONFIG_ADSP_SHMEM=y -CONFIG_RADIO_IRIS=y -CONFIG_RADIO_IRIS_TRANSPORT=y CONFIG_QCOM_KGSL=y CONFIG_FB=y CONFIG_FB_VIRTUAL=y @@ -412,18 +392,7 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y CONFIG_DUAL_ROLE_USB_INTF=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y @@ -433,9 +402,6 @@ CONFIG_USB_CI13XXX_MSM=y CONFIG_USB_CONFIGFS=y CONFIG_USB_CONFIGFS_SERIAL=y CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_QCRNDIS=y -CONFIG_USB_CONFIGFS_RNDIS=y -CONFIG_USB_CONFIGFS_RMNET_BAM=y CONFIG_USB_CONFIGFS_MASS_STORAGE=y CONFIG_USB_CONFIGFS_F_FS=y CONFIG_USB_CONFIGFS_F_MTP=y @@ -448,7 +414,6 @@ CONFIG_USB_CONFIGFS_F_HID=y CONFIG_USB_CONFIGFS_F_DIAG=y CONFIG_USB_CONFIGFS_F_CDEV=y CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y # CONFIG_PWRSEQ_EMMC is not set @@ -462,7 +427,6 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_SDHCI_MSM_ICE=y -CONFIG_MMC_CQ_HCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_QPNP_HAPTICS=y @@ -481,9 +445,6 @@ CONFIG_ASHMEM=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y CONFIG_ION=y CONFIG_ION_MSM=y -CONFIG_IPA=y -CONFIG_RMNET_IPA=y -CONFIG_RNDIS_IPA=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_QPNP_REVID=y @@ -504,6 +465,7 @@ CONFIG_MSM_L2_SPM=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QPNP_PBS=y CONFIG_QCOM_MEMORY_DUMP_V2=y CONFIG_MSM_DEBUG_LAR_UNLOCK=y CONFIG_MSM_RPM_SMD=y @@ -525,7 +487,6 @@ CONFIG_MSM_SYSMON_COMM=y CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y -CONFIG_ICNSS=y CONFIG_MSM_PERFORMANCE=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_AVTIMER=y diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index a1ede1966bafdd1b58fce83c839b582577a4bef5..7d9aa284cb6f4cbe4137ca00984109de0690fd4f 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig @@ -202,7 +202,6 @@ CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d1df9cc2a2598a7047904a9cedcbdc4fe884872e..b0e817576af150395e0225f8a7677706f98dabf5 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -385,6 +385,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writesw(p,d,l) __raw_writesw(p,d,l) #define writesl(p,d,l) __raw_writesl(p,d,l) +#define readb_no_log(c) \ + ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; }) +#define readw_no_log(c) \ + ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; }) +#define readl_no_log(c) \ + ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; }) +#define readq_no_log(c) \ + ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; }) + #ifndef __ARMBE__ static inline void memset_io(volatile void __iomem *dst, unsigned c, size_t count) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 9d2b9426b1555692f6c53f73e4d3ed779e3ffa83..8d6c75167822cc53e726c3098fef69d31bb3e788 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -273,16 +273,15 @@ __sys_trace: cmp scno, #-1 @ skip the syscall? bne 2b add sp, sp, #S_OFF @ restore stack - b ret_slow_syscall -__sys_trace_return: - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 +__sys_trace_return_nosave: + enable_irq_notrace mov r0, sp bl syscall_trace_exit b ret_slow_syscall -__sys_trace_return_nosave: - enable_irq_notrace +__sys_trace_return: + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 mov r0, sp bl syscall_trace_exit b ret_slow_syscall diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index b3d268a79f05771d47d148bddd122cfbb23e7f53..bb0d5e21d60bda0ded30c3b0ec2b273a2fe81abf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -366,7 +366,8 @@ static void stage2_flush_memslot(struct kvm *kvm, pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { next = stage2_pgd_addr_end(addr, end); - stage2_flush_puds(kvm, pgd, addr, next); + if (!stage2_pgd_none(*pgd)) + stage2_flush_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index ef3add9992631f3ad9dbc6388fc9a7da79940f3c..8db549c56914dd77331ad3040a0e0af87fa08cec 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -864,8 +864,8 @@ static s8 dm365_queue_priority_mapping[][2] = { }; static const struct dma_slave_map dm365_edma_map[] = { - { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) }, - { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) }, + { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) }, + { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) }, { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) }, { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) }, { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) }, diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 1515e498d348c6ea1636149e35665b4fd2e2b239..dd9eb3f14f45c93f60a7bcf509d094b61bf72741 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -602,6 +602,28 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata IMX6Q_GPR1_GINT); } +static void imx6_pm_stby_poweroff(void) +{ + imx6_set_lpm(STOP_POWER_OFF); + imx6q_suspend_finish(0); + + mdelay(1000); + + pr_emerg("Unable to poweroff system\n"); +} + +static int imx6_pm_stby_poweroff_probe(void) +{ + if (pm_power_off) { + pr_warn("%s: pm_power_off already claimed %p %pf!\n", + __func__, pm_power_off, pm_power_off); + return -EBUSY; + } + + pm_power_off = imx6_pm_stby_poweroff; + return 0; +} + void __init imx6_pm_ccm_init(const char *ccm_compat) { struct device_node *np; @@ -618,6 +640,9 @@ void __init imx6_pm_ccm_init(const char *ccm_compat) val = readl_relaxed(ccm_base + CLPCR); val &= ~BM_CLPCR_LPM; writel_relaxed(val, ccm_base + CLPCR); + + if (of_property_read_bool(np, "fsl,pmic-stby-poweroff")) + imx6_pm_stby_poweroff_probe(); } void __init imx6q_pm_init(void) diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index cf65ab8bb0046f69d3bc244cc4636f2ead54a8b8..e5dcbda20129dab7a723154a18885fb3ee6c1b44 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -131,6 +131,9 @@ static int __init omap4_sram_init(void) struct device_node *np; struct gen_pool *sram_pool; + if (!soc_is_omap44xx() && !soc_is_omap54xx()) + return 0; + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); if (!np) pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index e2d84aa7f595ff3fa8672a3c6f87412a13b3e095..fa1c6707877a7b32f4c8397992eac0e175ae5cf3 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -939,7 +939,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 1ab7096af8e23c239c247f4bca8a39d66b955a9f..f850fc3a91e82bcfaa0282d240271e6a90f55be4 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -387,7 +387,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = { static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { .rev_offs = 0x0, .sysc_offs = 0x4, - .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type2, }; diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c index 7cd9865bdeb7bed3c46c575fb5fea5a022ac251b..94929eb707f07c8f91d685125d13ff7aac8807ed 100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu) * 0x4: Jump by mov instruction * 0x8: Jumping address */ - memcpy((__force void *)zero, &zynq_secondary_trampoline, + memcpy_toio(zero, &zynq_secondary_trampoline, trampoline_size); writel(address, zero + trampoline_size); diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 7d5f4c736a16b4c1f514d0c3ce768ede43f4cef1..cd18eda014c2420b3787da4ded9875e3e0927e2b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -767,6 +767,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, return NULL; } +static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long *inst) +{ + u32 instr = 0; + int fault; + + if (user_mode(regs)) + fault = get_user(instr, ip); + else + fault = probe_kernel_address(ip, instr); + + *inst = __mem_to_opcode_arm(instr); + + return fault; +} + +static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst) +{ + u16 instr = 0; + int fault; + + if (user_mode(regs)) + fault = get_user(instr, ip); + else + fault = probe_kernel_address(ip, instr); + + *inst = __mem_to_opcode_thumb16(instr); + + return fault; +} + static int do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -774,10 +804,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; - unsigned int fault; u16 tinstr = 0; int isize = 4; int thumb2_32b = 0; + int fault; if (interrupts_enabled(regs)) local_irq_enable(); @@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (thumb_mode(regs)) { u16 *ptr = (u16 *)(instrptr & ~1); - fault = probe_kernel_address(ptr, tinstr); - tinstr = __mem_to_opcode_thumb16(tinstr); + + fault = alignment_get_thumb(regs, ptr, &tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ - u16 tinst2 = 0; - fault = probe_kernel_address(ptr + 1, tinst2); - tinst2 = __mem_to_opcode_thumb16(tinst2); + u16 tinst2; + fault = alignment_get_thumb(regs, ptr + 1, &tinst2); instr = __opcode_thumb32_compose(tinstr, tinst2); thumb2_32b = 1; } else { @@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else { - fault = probe_kernel_address((void *)instrptr, instr); - instr = __mem_to_opcode_arm(instr); + fault = alignment_get_arm(regs, (void *)instrptr, &instr); } if (fault) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index d6c9dee1b252c5b8d61d500d1707cb2b13ba2f82..9453c92f9f8f3b1eb99bd13a89e229b86ee9d0fd 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -214,7 +214,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; - if (fsr & FSR_WRITE) + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) mask = VM_WRITE; if (fsr & FSR_LNX_PF) mask = VM_EXEC; @@ -284,7 +284,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if (fsr & FSR_WRITE) + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) flags |= FAULT_FLAG_WRITE; /* diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index afc1f84e763b248b2193715e757d432cc055eac8..9bc272642d55a79885e97383011faa62e21c303a 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -5,6 +5,7 @@ * Fault status register encodings. We steal bit 31 for our own purposes. */ #define FSR_LNX_PF (1 << 31) +#define FSR_CM (1 << 13) #define FSR_WRITE (1 << 11) #define FSR_FS4 (1 << 10) #define FSR_FS3_0 (15) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index efbc7d50b0e829e62e222f72d1416c3ce8f2f227..1dee9630714d7950e25858ec58c92bef6135205e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -805,7 +805,8 @@ static void update_sections_early(struct section_perm perms[], int n) if (t->flags & PF_KTHREAD) continue; for_each_thread(t, s) - set_section_perms(perms, n, true, s->mm); + if (s->mm) + set_section_perms(perms, n, true, s->mm); } read_unlock(&tasklist_lock); set_section_perms(perms, n, true, current->active_mm); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5d06273db37912bc884a1c65cf780cf22da542c0..f8987066eba17f34251739eed2da5a0fc2a17001 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1168,10 +1168,29 @@ void __init adjust_lowmem_bounds(void) */ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; + /* + * The first usable region must be PMD aligned. Mark its start + * as MEMBLOCK_NOMAP if it isn't + */ + for_each_memblock(memory, reg) { + if (!memblock_is_nomap(reg)) { + if (!IS_ALIGNED(reg->base, PMD_SIZE)) { + phys_addr_t len; + + len = round_up(reg->base, PMD_SIZE) - reg->base; + memblock_mark_nomap(reg->base, len); + } + break; + } + } + for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; + if (memblock_is_nomap(reg)) + continue; + if (reg->base < vmalloc_limit) { if (block_end > lowmem_limit) /* diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index b79f592b0203561282c63f17fe8654e415a3eda9..5b95ee8742dee219da3168f48d2d08319736dd92 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -175,6 +175,7 @@ archclean: $(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)/dts +ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since # asm-offsets.h is included in some files used to generate vdso-offsets.h, and @@ -184,6 +185,7 @@ archclean: prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h +endif define archhelp echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi index bd3adeac374f4d230d74470aaf58c1be839e809a..2973a14523eaf5ab5577f680ffc2a454ba679818 100644 --- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi +++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi @@ -106,7 +106,7 @@ clock-names = "uartclk", "apb_pclk"; }; - spi0: ssp@e1020000 { + spi0: spi@e1020000 { status = "disabled"; compatible = "arm,pl022", "arm,primecell"; reg = <0 0xe1020000 0 0x1000>; @@ -116,7 +116,7 @@ clock-names = "apb_pclk"; }; - spi1: ssp@e1030000 { + spi1: spi@e1030000 { status = "disabled"; compatible = "arm,pl022", "arm,primecell"; reg = <0 0xe1030000 0 0x1000>; diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi index fbafa24cd5335b90de29af9a3f3a731bc616ac10..5e0c5dc973e3383fcca7ccd4a07605f2e839e87b 100644 --- a/arch/arm64/boot/dts/lg/lg1312.dtsi +++ b/arch/arm64/boot/dts/lg/lg1312.dtsi @@ -167,14 +167,14 @@ clock-names = "apb_pclk"; status="disabled"; }; - spi0: ssp@fe800000 { + spi0: spi@fe800000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe800000 0x1000>; interrupts = ; clocks = <&clk_bus>; clock-names = "apb_pclk"; }; - spi1: ssp@fe900000 { + spi1: spi@fe900000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe900000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi index e703e1149c7570829201538d713c84688d72c180..f3b1ba6f74220a8794984bd27750674742f894a6 100644 --- a/arch/arm64/boot/dts/lg/lg1313.dtsi +++ b/arch/arm64/boot/dts/lg/lg1313.dtsi @@ -167,14 +167,14 @@ clock-names = "apb_pclk"; status="disabled"; }; - spi0: ssp@fe800000 { + spi0: spi@fe800000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe800000 0x1000>; interrupts = ; clocks = <&clk_bus>; clock-names = "apb_pclk"; }; - spi1: ssp@fe900000 { + spi1: spi@fe900000 { compatible = "arm,pl022", "arm,primecell"; reg = <0x0 0xfe900000 0x1000>; interrupts = ; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi index 6a51d282ec6366f5d42dab12401dfa85d58840dd..d1e687b4911f5c056ad4c7d8d88e3d7c5319c20f 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi @@ -281,6 +281,7 @@ status = "okay"; bus-width = <8>; non-removable; + vqmmc-supply = <&vdd_1v8>; }; clocks { diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 9da89fd34c12eec0086ed7d3ceca1726dbbac887..eed610cc3cd225af24543c09846467c2028702fe 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -473,7 +473,6 @@ msm8953-ext-codec-mtp-overlay.dtbo-base := msm8953.dtb \ msm8953-ext-codec-rcm-overlay.dtbo-base := msm8953.dtb \ apq8053.dtb msm8953-cdp-1200p-overlay.dtbo-base := msm8953.dtb - sdm450-cdp-s2-overlay.dtbo-base := sdm450-pmi632.dtb \ sdm632.dtb \ sdm632-pm8004.dtb \ @@ -486,7 +485,6 @@ sdm450-mtp-s3-overlay.dtbo-base := sdm450-pmi632.dtb \ sdm450-qrd-sku4-overlay.dtbo-base := sdm450-pmi632.dtb \ sdm632.dtb \ sdm632-pm8004.dtb - sdm632-rumi-overlay.dtbo-base := sdm632.dtb sdm632-ext-codec-cdp-s3-overlay.dtbo-base := sdm632.dtb \ sdm632-pm8004.dtb @@ -557,8 +555,7 @@ dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \ msm8953-pmi8940-ext-codec-mtp.dtb \ msm8953-pmi8937-ext-codec-mtp.dtb \ msm8953-pmi632-cdp-s2.dtb \ - apq8053-batcam.dtb \ - msm8953-no-pmi.dtb + apq8053-batcam.dtb dtb-$(CONFIG_ARCH_MSM8937) += msm8937-pmi8950-mtp.dtb \ msm8937-interposer-sdm439-cdp.dtb \ @@ -622,8 +619,7 @@ dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \ sdm450-pmi632-cdp-s2.dtb \ sdm450-pmi632-mtp-s3.dtb \ sda450-pmi632-cdp-s2.dtb \ - sda450-pmi632-mtp-s3.dtb \ - sdm450-no-pmi-mtp.dtb + sda450-pmi632-mtp-s3.dtb dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \ sdm632-cdp-s2.dtb \ diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-auo-416p-amoled-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-auo-416p-amoled-cmd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..44b578a7718a6f3b7bb182ff890686eedc8d099c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/dsi-panel-auo-416p-amoled-cmd.dtsi @@ -0,0 +1,121 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_auo_416p_amoled_cmd: qcom,mdss_dsi_auo_416p_amoled_cmd { + qcom,mdss-dsi-panel-name = "AUO 416p AMOLED command mode dsi panel"; + qcom,mdss-dsi-panel-controller = <&mdss_dsi0>; + qcom,mdss-dsi-panel-type = "dsi_cmd_mode"; + qcom,mdss-dsi-panel-destination = "display_1"; + qcom,mdss-dsi-panel-framerate = <45>; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-panel-width = <416>; + qcom,mdss-dsi-panel-height = <416>; + qcom,mdss-dsi-h-front-porch = <4>; + qcom,mdss-dsi-h-back-porch = <6>; + qcom,mdss-dsi-h-pulse-width = <4>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <1>; + qcom,mdss-dsi-v-front-porch = <1>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-color-order = "rgb_swap_rgb"; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-pixel-packing = "tight"; + qcom,mdss-dsi-pixel-alignment = <0>; + qcom,mdss-dsi-on-command = [ + 05 01 00 00 01 00 02 00 00 + 15 01 00 00 00 00 02 FE 01 + 15 01 00 00 00 00 02 0A F0 + 15 01 00 00 00 00 02 FE 00 + 29 01 00 00 00 00 05 2A 00 1E 01 BD + 29 01 00 00 00 00 05 2B 00 00 01 9F + 29 01 00 00 00 00 05 30 00 00 01 9F + 29 01 00 00 00 00 05 31 00 1E 01 BD + 05 01 00 00 00 00 02 12 00 + 15 01 00 00 00 00 02 35 02 + 15 01 00 00 00 00 02 53 20 + 15 01 00 00 00 00 02 51 FF + 15 01 00 00 28 00 02 66 00 + 15 01 00 00 00 00 02 63 FF + 05 01 00 00 32 00 02 11 00 + 05 01 00 00 1E 00 02 29 00 + ]; + /*15 01 00 00 00 00 02 51 00*/ + qcom,mdss-dsi-off-command = [ + 05 01 00 00 00 00 02 28 00 + 05 01 00 00 78 00 02 10 00 + ]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-idle-on-command = [ + 15 01 00 00 46 00 02 FE 00 + 05 01 00 00 00 00 01 39 + ]; + qcom,mdss-dsi-idle-on-command-state = "dsi_hs_mode"; + /*TODO: will remove set colum/row address*/ + qcom,mdss-dsi-idle-off-command = [ + 05 01 00 00 1f 00 01 38 /* Idle-Mode Off */ + 15 01 00 00 00 00 02 FE 00 + 29 01 00 00 00 00 05 2A 00 1E 01 BD + 29 01 00 00 00 00 05 2B 00 00 01 9F + 29 01 00 00 00 00 05 30 00 00 01 9F + 29 01 00 00 00 00 05 31 00 1E 01 BD + 05 01 00 00 00 00 02 12 00 + ]; + + qcom,mdss-dsi-hbm0-on-command = [ + 15 01 00 00 00 00 02 FE 01 + 15 01 00 00 00 00 02 11 93 + 15 01 00 00 00 00 02 FE 00 + 15 01 00 00 00 00 02 66 02 + ]; + qcom,mdss-dsi-hbm-off-command = [ + 15 01 00 00 00 00 02 FE 00 + 15 01 00 00 28 00 02 66 00 + 15 01 00 00 00 00 02 FE 01 + 15 01 00 00 00 00 02 11 80 + 15 01 00 00 00 00 02 FE 00 + ]; + qcom,mdss-dsi-h-sync-pulse = <1>; + qcom,mdss-dsi-traffic-mode = "burst_mode"; + qcom,mdss-dsi-lane-map = "lane_map_0123"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-tear-check-frame-rate = <4500>; + qcom,mdss-dsi-idle-fps = <15>; + /* clk = totlaH * totalV * bpp* 60fps */ + qcom,mdss-dsi-panel-clockrate = <308655360>; + qcom,mdss-dsi-te-pin-select = <1>; + qcom,mdss-dsi-te-dcs-command = <1>; + qcom,mdss-dsi-te-using-te-pin; + qcom,mdss-dsi-te-check-enable; + qcom,mdss-dsi-panel-timings = [ + 4E 0E 08 00 2E 30 0E 12 0A 03 04 00 + ]; + qcom,mdss-dsi-t-clk-post = <0x05>; + qcom,mdss-dsi-t-clk-pre = <0x12>; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <255>; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts index 7f5fc4e4c678d4f64c282567ae4f3ba2c594c204..c8930b9fbbe0fd60f24b63ec3db5bae35f1b04c7 100644 --- a/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,3 +20,74 @@ model = "QRD SKU3"; qcom,board-id = <0xb 0>; }; + +&int_codec { + status = "disabled"; +}; + +&pmic_analog_codec { + status = "disabled"; +}; + +&cdc_pri_mi2s_gpios { + status = "disabled"; +}; + +&cdc_comp_gpios { + status = "disabled"; +}; + +&slim_msm { + status = "okay"; +}; + +&dai_slim { + status = "okay"; +}; + +&wcd9xxx_intc { + status = "okay"; +}; + +&clock_audio { + status = "okay"; +}; + +&wcd9335 { + status = "okay"; +}; + +&cdc_us_euro_sw { + status = "okay"; +}; + +&wcd_rst_gpio { + status = "okay"; +}; + +&ext_codec { + qcom,model = "msm8953-sku3-tasha-snd-card"; + status = "okay"; + + qcom,audio-routing = + "AIF4 VI", "MCLK", + "RX_BIAS", "MCLK", + "DMIC0", "MIC BIAS1", + "MIC BIAS1", "Digital Mic0", + "AMIC2", "MIC BIAS2", + "MIC BIAS2", "Headset Mic", + "DMIC2", "MIC BIAS3", + "MIC BIAS3", "Digital Mic2", + "MIC BIAS1", "MICBIAS_REGULATOR", + "MIC BIAS2", "MICBIAS_REGULATOR", + "MIC BIAS3", "MICBIAS_REGULATOR", + "SpkrLeft IN", "SPK1 OUT"; + + qcom,cdc-us-euro-gpios; + + qcom,msm-mbhc-hphl-swh = <1>; + + qcom,wsa-max-devs = <1>; + qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft"; +}; diff --git a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi index 010694d02938b022cd51b567597d3dcea3725385..58966151f247e1ea5c870647603181f31083f166 100644 --- a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi @@ -112,7 +112,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <1>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l1 { @@ -128,7 +128,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <2>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l2 { @@ -144,7 +144,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <3>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l3 { @@ -160,7 +160,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <5>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l5 { @@ -176,7 +176,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <6>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l6 { @@ -192,7 +192,7 @@ qcom,resource-name = "ldoa"; qcom,resource-id = <7>; qcom,regulator-type = <0>; - qcom,hpm-min-load = <10000>; + qcom,hpm-min-load = <30000>; status = "disabled"; regulator-l7 { diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi index ed6ca20db1da74a2d82a91946994f6776776fa8c..3f3c8da6378e8e9ce5c23d04e97941e35fdbe0c8 100644 --- a/arch/arm64/boot/dts/qcom/qcs605.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi @@ -18,52 +18,40 @@ qcom,msm-id = <347 0x0>; }; -&removed_region { - reg = <0 0x85fc0000 0 0x1540000>; -}; - -&pil_camera_mem { - reg = <0 0x8b800000 0 0x500000>; -}; - &pil_modem_mem { - reg = <0 0x8bd00000 0 0x3100000>; + reg = <0 0x8b000000 0 0x3100000>; }; &pil_video_mem { - reg = <0 0x8ee00000 0 0x500000>; + reg = <0 0x8e100000 0 0x500000>; }; &wlan_msa_mem { - reg = <0 0x8f300000 0 0x100000>; + reg = <0 0x8e600000 0 0x100000>; }; &pil_cdsp_mem { - reg = <0 0x8f400000 0 0x800000>; + reg = <0 0x8e700000 0 0x800000>; }; &pil_mba_mem { - reg = <0 0x8fc00000 0 0x200000>; + reg = <0 0x8ef00000 0 0x200000>; }; &pil_adsp_mem { - reg = <0 0x8fe00000 0 0x1e00000>; + reg = <0 0x8f100000 0 0x1e00000>; }; &pil_ipa_fw_mem { - reg = <0 0x91c00000 0 0x10000>; + reg = <0 0x90f00000 0 0x10000>; }; &pil_ipa_gsi_mem { - reg = <0 0x91c10000 0 0x5000>; + reg = <0 0x90f10000 0 0x5000>; }; &pil_gpu_mem { - reg = <0 0x91c15000 0 0x2000>; -}; - -&qseecom_mem { - reg = <0 0x9e800000 0 0x1000000>; + reg = <0 0x90f15000 0 0x2000>; }; &adsp_mem { @@ -71,7 +59,7 @@ }; &secure_display_memory { - status = "disabled"; + size = <0 0xa000000>; }; &qcom_seecom { diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts index f895f178a45b88a02c0e675c4bb01b5864da742e..0ddd81d03c0215e7c65cf11ce835815cad60a501 100644 --- a/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts @@ -24,6 +24,118 @@ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>; }; +&soc { + + qcom,blackghost { + compatible = "qcom,pil-blackghost"; + qcom,pil-force-shutdown; + qcom,firmware-name = "bg-wear"; + /* GPIO inputs from blackghost */ + qcom,bg2ap-status-gpio = <&tlmm 44 0>; + qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>; + /* GPIO output to blackghost */ + qcom,ap2bg-status-gpio = <&tlmm 61 0>; + qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>; + }; + + qcom,msm-ssc-sensors { + compatible = "qcom,msm-ssc-sensors"; + }; + + qcom,glink-bgcom-xprt-bg { + compatible = "qcom,glink-bgcom-xprt"; + label = "bg"; + qcom,qos-config = <&glink_qos_bg>; + qcom,ramp-time = <0x10>, + <0x20>, + <0x30>, + <0x40>; + }; + + glink_qos_bg: qcom,glink-qos-config-bg { + compatible = "qcom,glink-qos-config"; + qcom,flow-info = <0x80 0x0>, + <0x70 0x1>, + <0x60 0x2>, + <0x50 0x3>; + qcom,mtu-size = <0x800>; + qcom,tput-stats-cycle = <0xa>; + }; + + qcom,glink_pkt { + compatible = "qcom,glinkpkt"; + + qcom,glinkpkt-bg-daemon { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "bg-daemon"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon"; + }; + + qcom,glinkpkt-bg-display-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "display-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl"; + }; + + qcom,glinkpkt-bg-display-data { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "display-data"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data"; + }; + + qcom,glinkpkt-bg-rsb-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "RSB_CTRL"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl"; + }; + + qcom,glinkpkt-bg-sso-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "sso-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl"; + }; + + qcom,glinkpkt-bg-buzzer-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "buzzer-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl"; + }; + }; + + spi_3: spi@78b7000 { /* BLSP1 QUP3*/ + status = "ok"; + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; + spi-max-frequency = <16000000>; + interrupt-parent = <&tlmm>; + qcom,irq-gpio = <&tlmm 43 1>; + }; + }; + + i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */ + status = "disabled"; + }; + + qcom,bg-rsb { + compatible = "qcom,bg-rsb"; + vdd-ldo1-supply = <&pm660_l11>; + }; + + qcom,bg-daemon { + compatible = "qcom,bg-daemon"; + qcom,bg-reset-gpio = <&pm660_gpios 5 0>; + ssr-reg1-supply = <&pm660_l3>; + ssr-reg2-supply = <&pm660_l9>; + }; +}; + &usb_otg { HSUSB_3p3-supply = <&L16A>; }; diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts index af32f0c7d216bb90a4c1d1c4c8315fc224a4181a..9746c0c6140305278780f5f9dc37e89b8ff61a70 100644 --- a/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts +++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts @@ -14,6 +14,7 @@ /dts-v1/; #include "sdm429-spyro.dtsi" +#include "sdm429w-bg-pm660.dtsi" / { model = "Qualcomm Technologies, Inc. SDA429 QRD BG WDP Spyro"; diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts index d12b34678fdddd6366973257a0fa968830329df4..8e98ded14eecf30de6fc864591bae5673faf85d9 100644 --- a/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -31,11 +31,11 @@ qcom,pil-force-shutdown; qcom,firmware-name = "bg-wear"; /* GPIO inputs from blackghost */ - qcom,bg2ap-status-gpio = <&msm_gpio 44 0>; - qcom,bg2ap-errfatal-gpio = <&msm_gpio 72 0>; + qcom,bg2ap-status-gpio = <&tlmm 44 0>; + qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>; /* GPIO output to blackghost */ - qcom,ap2bg-status-gpio = <&msm_gpio 61 0>; - qcom,ap2bg-errfatal-gpio = <&msm_gpio 62 0>; + qcom,ap2bg-status-gpio = <&tlmm 61 0>; + qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>; }; qcom,msm-ssc-sensors { @@ -108,21 +108,24 @@ }; }; - spi@78B8000 { /* BLSP1 QUP4 */ + spi_3: spi@78b7000 { /* BLSP1 QUP3*/ status = "ok"; qcom,bg-spi { compatible = "qcom,bg-spi"; reg = <0>; spi-max-frequency = <16000000>; - interrupt-parent = <&msm_gpio>; - qcom,irq-gpio = <&msm_gpio 43 1>; + interrupt-parent = <&tlmm>; + qcom,irq-gpio = <&tlmm 43 1>; }; }; + i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */ + status = "disabled"; + }; + qcom,bg-rsb { compatible = "qcom,bg-rsb"; vdd-ldo1-supply = <&pm660_l11>; - vdd-ldo2-supply = <&pm660_l15>; }; qcom,bg-daemon { @@ -133,6 +136,17 @@ }; }; +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_auo_416p_amoled_cmd>; + /delete-property/ vdd-supply; + vddio-supply = <&L11A>; + qcom,platform-enable-gpio = <&pm660_gpios 12 0>; +}; + +&dsi_pm660_panel_pwr_supply { + /delete-node/ qcom,panel-supply-entry@0; +}; + &usb_otg { HSUSB_3p3-supply = <&L16A>; }; diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts index e1c39182235dbe34263b49700511e947e669fb1e..2312951b6f758c3bd57eaee4874570ba43e5d992 100644 --- a/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts +++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts @@ -14,6 +14,7 @@ /dts-v1/; #include "sdm429-spyro.dtsi" +#include "sdm429w-bg-pm660.dtsi" / { model = "Qualcomm Technologies, Inc. SDM429 QRD DVT Spyro"; diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts index 2b820b6150523ef1c3d14bb3d2e20ecf76692b1f..46c7469989971d51caed92c56a13cfaa1496acd5 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts @@ -15,6 +15,8 @@ #include "sdm429-spyro.dtsi" #include "sdm429-spyro-qrd-evt.dtsi" +#include "sdm429w-bg-pm660.dtsi" + / { model = "Qualcomm Technologies, Inc. SDM429W BG IOT WTP"; compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd"; diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts index 53229a07f6733ff4d89d71e69405cd9ecdfa0e83..d78655b180764f969d214a417c5087e3c2bdaccc 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts @@ -31,11 +31,11 @@ qcom,pil-force-shutdown; qcom,firmware-name = "bg-wear"; /* GPIO inputs from blackghost */ - qcom,bg2ap-status-gpio = <&msm_gpio 44 0>; - qcom,bg2ap-errfatal-gpio = <&msm_gpio 72 0>; + qcom,bg2ap-status-gpio = <&tlmm 44 0>; + qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>; /* GPIO output to blackghost */ - qcom,ap2bg-status-gpio = <&msm_gpio 61 0>; - qcom,ap2bg-errfatal-gpio = <&msm_gpio 62 0>; + qcom,ap2bg-status-gpio = <&tlmm 61 0>; + qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>; }; qcom,msm-ssc-sensors { @@ -108,21 +108,24 @@ }; }; - spi@78B8000 { /* BLSP1 QUP4 */ + spi_3: spi@78b7000 { /*BLSP1 QUP3*/ status = "ok"; qcom,bg-spi { compatible = "qcom,bg-spi"; reg = <0>; spi-max-frequency = <16000000>; - interrupt-parent = <&msm_gpio>; - qcom,irq-gpio = <&msm_gpio 43 1>; + interrupt-parent = <&tlmm>; + qcom,irq-gpio = <&tlmm 43 1>; }; }; + i2c_3: i2c@78b8000 { /* BLSP1 QUP3 */ + status = "disabled"; + }; + qcom,bg-rsb { compatible = "qcom,bg-rsb"; vdd-ldo1-supply = <&pm660_l11>; - vdd-ldo2-supply = <&pm660_l15>; }; qcom,bg-daemon { diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts index 74159c4c7795532e5a0aa052bd4a1bbc26ba4ce2..26ed8a7d66bf25389e32ace4129b7f3b9eef8ab2 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts +++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts @@ -14,6 +14,7 @@ /dts-v1/; #include "sdm429-spyro.dtsi" +#include "sdm429w-bg-pm660.dtsi" / { model = "Qualcomm Technologies, Inc. SDM429 QRD BG WDP"; diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts index 96c2385a23d9b10797dc8f1f623c734bf2bb1e56..10b5f48482eae234a2e556df41727726e072a5d9 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,129 @@ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>; }; +&soc { + + qcom,blackghost { + compatible = "qcom,pil-blackghost"; + qcom,pil-force-shutdown; + qcom,firmware-name = "bg-wear"; + /* GPIO inputs from blackghost */ + qcom,bg2ap-status-gpio = <&tlmm 44 0>; + qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>; + /* GPIO output to blackghost */ + qcom,ap2bg-status-gpio = <&tlmm 61 0>; + qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>; + }; + + qcom,msm-ssc-sensors { + compatible = "qcom,msm-ssc-sensors"; + }; + + qcom,glink-bgcom-xprt-bg { + compatible = "qcom,glink-bgcom-xprt"; + label = "bg"; + qcom,qos-config = <&glink_qos_bg>; + qcom,ramp-time = <0x10>, + <0x20>, + <0x30>, + <0x40>; + }; + + glink_qos_bg: qcom,glink-qos-config-bg { + compatible = "qcom,glink-qos-config"; + qcom,flow-info = <0x80 0x0>, + <0x70 0x1>, + <0x60 0x2>, + <0x50 0x3>; + qcom,mtu-size = <0x800>; + qcom,tput-stats-cycle = <0xa>; + }; + + qcom,glink_pkt { + compatible = "qcom,glinkpkt"; + + qcom,glinkpkt-bg-daemon { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "bg-daemon"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon"; + }; + + qcom,glinkpkt-bg-display-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "display-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl"; + }; + + qcom,glinkpkt-bg-display-data { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "display-data"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data"; + }; + + qcom,glinkpkt-bg-rsb-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "RSB_CTRL"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl"; + }; + + qcom,glinkpkt-bg-sso-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "sso-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl"; + }; + + qcom,glinkpkt-bg-buzzer-ctrl { + qcom,glinkpkt-transport = "bgcom"; + qcom,glinkpkt-edge = "bg"; + qcom,glinkpkt-ch-name = "buzzer-ctrl"; + qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl"; + }; + }; + + spi_3: spi@78b7000 { /* BLSP1 QUP3*/ + status = "ok"; + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; + spi-max-frequency = <16000000>; + interrupt-parent = <&tlmm>; + qcom,irq-gpio = <&tlmm 43 1>; + }; + }; + + i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */ + status = "disabled"; + }; + + qcom,bg-rsb { + compatible = "qcom,bg-rsb"; + vdd-ldo1-supply = <&pm660_l11>; + }; + + qcom,bg-daemon { + compatible = "qcom,bg-daemon"; + qcom,bg-reset-gpio = <&pm660_gpios 5 0>; + ssr-reg1-supply = <&pm660_l3>; + ssr-reg2-supply = <&pm660_l9>; + }; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_auo_416p_amoled_cmd>; + /delete-property/ vdd-supply; + vddio-supply = <&L11A>; + qcom,platform-enable-gpio = <&pm660_gpios 12 0>; +}; + +&dsi_pm660_panel_pwr_supply { + /delete-node/ qcom,panel-supply-entry@0; +}; + &usb_otg { HSUSB_3p3-supply = <&L16A>; }; diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts index 4caa7fee8f54b139ddfe8dbb6d5edae35a12bf21..62294f04eb3aae23e7ecdbbd46a685d201bddf1d 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts @@ -14,6 +14,7 @@ /dts-v1/; #include "sdm429-spyro.dtsi" +#include "sdm429w-bg-pm660.dtsi" / { model = "Qualcomm Technologies, Inc. SDM429 BG WTP"; diff --git a/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-mdss-panels.dtsi index 6d78ede7d178827a6f62d0a781c043818f24e89a..cc06f8ab87f8ad017a2cb947ac156ce5d46a30e9 100644 --- a/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-mdss-panels.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-mdss-panels.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,9 +10,9 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - #include "dsi-panel-edo-rm67162-qvga-cmd.dtsi" #include "dsi-panel-truly-rm69090-qvga-cmd.dtsi" +#include "dsi-panel-auo-416p-amoled-cmd.dtsi" &soc { dsi_pm660_panel_pwr_supply: dsi_pm660_panel_pwr_supply { @@ -100,3 +100,13 @@ qcom,esd-check-enabled; qcom,mdss-dsi-panel-status-check-mode = "te_signal_check"; }; + +&dsi_auo_416p_amoled_cmd { + /delete-property/ qcom,mdss-dsi-panel-timings; + qcom,mdss-dsi-panel-timings-phy-12nm = [06 05 01 0A 00 03 01 0F]; + qcom,panel-supply-entries = <&dsi_pm660_panel_pwr_supply>; + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "te_signal_check"; + qcom,mdss-dsi-power-off-disable; + qcom,mdss-dsi-tear-disable; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi similarity index 70% rename from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts rename to arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi index fa9ccbbcc97fda4e1f3ddad5f57ca497ea15b78f..7c389ff3762c99e7df73dea0b0f958f49f2617fb 100644 --- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts +++ b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi @@ -11,13 +11,22 @@ * GNU General Public License for more details. */ -/dts-v1/; +#include "sdm429w-pm660.dtsi" -#include "msm8953.dtsi" -#include "msm8953-mtp.dtsi" +&pm660_misc { + qcom,support-twm-config; +}; + +&pm660_pbs { + status = "okay"; +}; -/ { - model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC"; - compatible = "qcom,msm8953"; - qcom,pmic-id = <0x010016 0x0 0x0 0x0>; +&pm660_pon { + qcom,support-twm-config; + qcom,pbs-client = <&pm660_pbs>; }; + +&pm660_fg { + qcom,fg-disable-in-twm; +}; + diff --git a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi index f6d85f423005dc29e4a53a17e749ab55c6149237..0dd603531b75ee59131371fd069d21c53e1dcd90 100644 --- a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi @@ -371,9 +371,10 @@ /* over-write the PM660 GPIO mappings for 429w */ &pm660_gpios { interrupts = <0x0 0xc3 0 IRQ_TYPE_NONE>, + <0x0 0xc4 0 IRQ_TYPE_NONE>, <0x0 0xcb 0 IRQ_TYPE_NONE>; - interrupt-names = "pm660_gpio4", "pm660_gpio12"; - qcom,gpios-disallowed = <1 2 3 5 6 7 8 9 10 11 13>; + interrupt-names = "pm660_gpio4", "pm660_gpio5", "pm660_gpio12"; + qcom,gpios-disallowed = <1 2 3 6 7 8 9 10 11 13>; }; &pm660_vadc { @@ -761,4 +762,3 @@ /delete-property/ vddio-supply; vddio-supply = <&L13A>; }; - diff --git a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi index db76a2483f21280a35a202bb443349b3b6d52fa6..6e510138fe717b89ee0d5afe62370986d46ccf26 100644 --- a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi @@ -265,6 +265,7 @@ regulator-min-microvolt = <1600000>; regulator-max-microvolt = <2040000>; qcom,init-voltage = <1600000>; + regulator-system-load = <20000>; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts deleted file mode 100644 index cd78c2c5a4f2b109112ef1d20d75f5a4b618832a..0000000000000000000000000000000000000000 --- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; -/plugin/; - -/ { - model = " SDM450 NOPMI MTP"; - qcom,board-id = <8 3>; -}; - diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts deleted file mode 100644 index 71a9eeffecb3a211a92d629e9dfd200a72ba89d9..0000000000000000000000000000000000000000 --- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "sdm450.dtsi" -#include "msm8953-mtp.dtsi" -/ { - model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3"; - compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp"; - qcom,board-id = <8 3>; - qcom,pmic-id = <0x010016 0x0 0x0 0x0>; -}; - -&eeprom0 { - cam_vdig-supply = <&pm8953_l23>; -}; - -&camera0 { - cam_vdig-supply = <&pm8953_l23>; -}; - -&pm8953_gpios { - bklt_en { - bklt_en_default: bklt_en_default { - pins = "gpio4"; - function = "normal"; - power-source = <0>; - output-high; - }; - }; -}; - -&pm8953_pwm { - status = "ok"; -}; - -&mdss_dsi0 { - qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>; - pinctrl-names = "mdss_default", "mdss_sleep"; - pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>; - pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; - qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>; - -}; - -&dsi_truly_1080_vid { - qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; - qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>; - qcom,mdss-dsi-bl-pmic-bank-select = <0>; - qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>; -}; - -&dsi_hx8399c_truly_vid { - qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; - qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>; - qcom,mdss-dsi-bl-pmic-bank-select = <0>; - qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts deleted file mode 100644 index afb35543c23f3ab2198ee16412127777330832e8..0000000000000000000000000000000000000000 --- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "sdm450.dtsi" -#include "msm8953-mtp.dtsi" -/ { - model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3"; - compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp"; - qcom,pmic-id = <0x010016 0x0 0x0 0x0>; -}; - -&eeprom0 { - cam_vdig-supply = <&pm8953_l23>; -}; - -&camera0 { - cam_vdig-supply = <&pm8953_l23>; -}; - -&pm8953_gpios { - bklt_en { - bklt_en_default: bklt_en_default { - pins = "gpio4"; - function = "normal"; - power-source = <0>; - output-high; - }; - }; -}; - -&pm8953_pwm { - status = "ok"; -}; - -&mdss_dsi0 { - qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>; - pinctrl-names = "mdss_default", "mdss_sleep"; - pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>; - pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; - qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>; - -}; - -&dsi_truly_1080_vid { - qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; - qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>; - qcom,mdss-dsi-bl-pmic-bank-select = <0>; - qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>; -}; - -&dsi_hx8399c_truly_vid { - qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; - qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>; - qcom,mdss-dsi-bl-pmic-bank-select = <0>; - qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>; -}; diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts index cb01d3917aac165f1f96c29cf1de71ff03b5d2fe..4e9a21710191eb4fae946ca8e057384c9a0bf93f 100644 --- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts +++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts @@ -19,6 +19,6 @@ / { model = "Qualcomm Technologies, Inc. SDM450 + PMI632 SOC"; compatible = "qcom,sdm450"; - qcom,pmic-id = <0x16 0x25 0x0 0x0>; + qcom,pmic-id = <0x010016 0x25 0x0 0x0>; qcom,pmic-name = "PMI632"; }; diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi index 12598939503c471ffd1c12fea490be0238a0f6c0..40406230b5bf0b41d3b050fdb1f6e7bd1a9eea7e 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi @@ -80,7 +80,7 @@ <&apps_smmu 0x10a0 0x8>, <&apps_smmu 0x10b0 0x0>; buffer-types = <0xfff>; - virtual-addr-pool = <0x79000000 0x87000000>; + virtual-addr-pool = <0x79000000 0x67000000>; }; secure_bitstream_cb { @@ -180,7 +180,7 @@ <&apps_smmu 0x10a0 0x8>, <&apps_smmu 0x10b0 0x0>; buffer-types = <0xfff>; - virtual-addr-pool = <0x79000000 0x87000000>; + virtual-addr-pool = <0x79000000 0x67000000>; }; secure_bitstream_cb { diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index 3c1e253d19a8a3fad2bbb88aa3fad6db869111f5..897c9863f49a3502c73e8b377bfd3664a4824a70 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -31,6 +31,7 @@ CONFIG_SGETMASK_SYSCALL=y # CONFIG_SYSFS_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set @@ -190,6 +191,7 @@ CONFIG_NET_EMATCH_U32=y CONFIG_NET_CLS_ACT=y CONFIG_VSOCKETS=y CONFIG_VIRTIO_VSOCKETS=y +CONFIG_BPF_JIT=y CONFIG_CFG80211=y # CONFIG_CFG80211_DEFAULT_PS is not set CONFIG_MAC80211=y @@ -212,6 +214,7 @@ CONFIG_SCSI_VIRTIO=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y diff --git a/arch/arm64/configs/qcs605-perf_defconfig b/arch/arm64/configs/qcs605-perf_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..775ac0ccc160ce0679e0368cf1429bbb82fa71c5 --- /dev/null +++ b/arch/arm64/configs/qcs605-perf_defconfig @@ -0,0 +1,658 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHEDTUNE=y +CONFIG_BLK_CGROUP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SDM670=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CMA=y +CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_PROCESS_RECLAIM=y +CONFIG_SECCOMP=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_PSCI_BP_HARDENING=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +# CONFIG_EFI is not set +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_IPC_ROUTER=y +CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_AQT_REGMAP=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_QPNP_MISC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_REQ_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_SKY2=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_LAN78XX=y +CONFIG_USB_USBNET=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_DIAG_CHAR=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_PINCTRL_SDM670=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_NX30P6093=y +CONFIG_QPNP_FG_GEN3=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB2=y +CONFIG_QPNP_QNOVO=y +CONFIG_SMB1390_CHARGE_PUMP=y +CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_MSM_BCL_PERIPHERAL_CTL=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_QPNP_OLEDB=y +CONFIG_REGULATOR_QPNP=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_SW=y +CONFIG_QCOM_KGSL=y +CONFIG_DRM=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_DRM_LT_LT9611=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_SONY=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_USB_LINK_LAYER_TEST=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_UVC=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y +CONFIG_MMC_CQ_HCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_QPNP=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_WLED=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_QPNP_COINCELL=y +CONFIG_QPNP_REVID=y +CONFIG_USB_BAM=y +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y +CONFIG_QCOM_GENI_SE=y +CONFIG_MSM_GCC_SDM845=y +CONFIG_MSM_VIDEOCC_SDM845=y +CONFIG_MSM_CAMCC_SDM845=y +CONFIG_MSM_DISPCC_SDM845=y +CONFIG_CLOCK_QPNP_DIV=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_GPUCC_SDM845=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_REMOTE_SPINLOCK_MSM=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SDM670_LLCC=y +CONFIG_QCOM_QCS605_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QPNP_PBS=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_MINIDUMP=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_MSM_SMEM=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_LOOPBACK_SERVER=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_MSM_GLINK_SPI_XPRT=y +CONFIG_MSM_SPCOM=y +CONFIG_MSM_SPSS_UTILS=y +CONFIG_TRACER_PKT=y +CONFIG_QTI_RPMH_API=y +CONFIG_MSM_SMP2P=y +CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y +CONFIG_MSM_QMI_INTERFACE=y +CONFIG_MSM_GLINK_PKT=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_GLINK_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_ICNSS=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_MSM_QBT1000=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_REMOTEQDSS=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_RRADC=y +CONFIG_PWM=y +CONFIG_PWM_QPNP=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM=y +# CONFIG_NVMEM_SYSFS is not set +CONFIG_QCOM_QFPROM=y +CONFIG_SENSORS_SSC=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_IPC_LOGGING=y +CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_CRC32_ARM64=y +CONFIG_QMI_ENCDEC=y diff --git a/arch/arm64/configs/qcs605_defconfig b/arch/arm64/configs/qcs605_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..3b7319cb1d8aee2bae5e2ce3256876330318fa56 --- /dev/null +++ b/arch/arm64/configs/qcs605_defconfig @@ -0,0 +1,721 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHEDTUNE=y +CONFIG_BLK_CGROUP=y +CONFIG_DEBUG_BLK_CGROUP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SDM670=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_PROCESS_RECLAIM=y +CONFIG_SECCOMP=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_PSCI_BP_HARDENING=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_DNS_RESOLVER=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_IPC_ROUTER=y +CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_AQT_REGMAP=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_QPNP_MISC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_REQ_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_LAN78XX=y +CONFIG_USB_USBNET=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SERIAL_MSM_GENI_CONSOLE=y +CONFIG_DIAG_CHAR=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_PINCTRL_SDM670=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_NX30P6093=y +CONFIG_QPNP_FG_GEN3=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB2=y +CONFIG_QPNP_QNOVO=y +CONFIG_SMB1390_CHARGE_PUMP=y +CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_MSM_BCL_PERIPHERAL_CTL=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_QPNP_OLEDB=y +CONFIG_REGULATOR_QPNP=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_SW=y +CONFIG_QCOM_KGSL=y +CONFIG_DRM=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_DRM_LT_LT9611=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_HID_SONY=y +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_USB_LINK_LAYER_TEST=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_UVC=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_SDHCI_MSM_ICE=y +CONFIG_MMC_CQ_HCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_QPNP=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_WLED=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_EDAC=y +CONFIG_EDAC_MM_EDAC=y +CONFIG_EDAC_KRYO3XX_ARM64=y +CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_QPNP_COINCELL=y +CONFIG_QPNP_REVID=y +CONFIG_USB_BAM=y +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y +CONFIG_QCOM_GENI_SE=y +CONFIG_MSM_GCC_SDM845=y +CONFIG_MSM_VIDEOCC_SDM845=y +CONFIG_MSM_CAMCC_SDM845=y +CONFIG_MSM_DISPCC_SDM845=y +CONFIG_CLOCK_QPNP_DIV=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_GPUCC_SDM845=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_REMOTE_SPINLOCK_MSM=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SDM670_LLCC=y +CONFIG_QCOM_QCS605_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_MSM_GLADIATOR_HANG_DETECT=y +CONFIG_MSM_GLADIATOR_ERP=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QPNP_PBS=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_MINIDUMP=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_MSM_SMEM=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_LOOPBACK_SERVER=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_MSM_GLINK_SPI_XPRT=y +CONFIG_MSM_SPCOM=y +CONFIG_MSM_SPSS_UTILS=y +CONFIG_TRACER_PKT=y +CONFIG_QTI_RPMH_API=y +CONFIG_MSM_SMP2P=y +CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y +CONFIG_MSM_QMI_INTERFACE=y +CONFIG_MSM_GLINK_PKT=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_GLINK_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_MSM_QBT1000=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_REMOTEQDSS=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_RRADC=y +CONFIG_PWM=y +CONFIG_PWM_QPNP=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_PHY_XGENE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM=y +# CONFIG_NVMEM_SYSFS is not set +CONFIG_QCOM_QFPROM=y +CONFIG_SENSORS_SSC=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EFIVAR_FS=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_MODULE_LOAD_INFO=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_PREEMPTIRQ_EVENTS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_LKDTM=y +CONFIG_MEMTEST=y +CONFIG_PANIC_ON_DATA_CORRUPTION=y +CONFIG_ARM64_PTDUMP=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_CORESIGHT_TGU=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_CRC32_ARM64=y +CONFIG_XZ_DEC=y +CONFIG_QMI_ENCDEC=y diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 0f2e1ab5e16669d3cc7dd27170243f5f79424a6b..9b2e2e2e728ae7abaf7252ea02337654549751e2 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -73,7 +73,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ -static inline unsigned long __xchg##sfx(unsigned long x, \ +static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ @@ -115,7 +115,7 @@ __XCHG_GEN(_mb) #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ @@ -248,7 +248,7 @@ __CMPWAIT_CASE( , , 8); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ -static inline void __cmpwait##sfx(volatile void *ptr, \ +static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index bfaead19708343fb056ea18a4f56b90408199841..e5733cbd0c84906e285bbcf21a54763f264285b0 100755 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -805,7 +805,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); - die("Oops - bad mode", regs, 0); local_irq_disable(); panic("bad mode"); } diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index b581e16320dd8adfc638b05766a5af5a91e0d53d..296afafde120bca3100e39bbcf365fee7ddf8bd6 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -57,5 +57,7 @@ ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 9: mov x0, x2 // return the original size +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ + CONFIG_ARM64_PAN) ret .previous diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index c7a7d9689e8f231a0ed672b3b32c0b6c85bacdc8..d0c356b1029125303cd7be6d434dcab4fd4bd5e9 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -75,5 +75,7 @@ ENDPROC(__arch_copy_from_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ + CONFIG_ARM64_PAN) ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 800779eb3079dfafea890f272f64492e801c8986..026294edcd27ea302864204cbe659b9d629295da 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -76,5 +76,7 @@ ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ + CONFIG_ARM64_PAN) ret .previous diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index f6cfcc0441de64e2e159557e1f3e3594bd011f3f..2d07da43e2d2b6af32ed9f46d6de71e64b5d96e2 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -74,5 +74,7 @@ ENDPROC(__arch_copy_to_user) .section .fixup,"ax" .align 2 9998: sub x0, end, dst // bytes not copied +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ + CONFIG_ARM64_PAN) ret .previous diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 4b32168cf91a0e3b99e1d899960d47fddf38ba06..b1e42bad69ac376d04865aa5e9e101f44a20e6ed 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -424,7 +424,7 @@ static int __init dummy_numa_init(void) if (numa_off) pr_info("NUMA disabled\n"); /* Forced off on command line. */ pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", - 0LLU, PFN_PHYS(max_pfn) - 1); + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); for_each_memblock(memory, mblk) { ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index c5bad6d9f7df1ae36e83bf8693c4e5ef49894a5e..b41d49046dc20cb6891c91c35b3463e1e349e815 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -316,6 +316,15 @@ skip_pgd: msr sctlr_el1, x18 isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb + /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index d1d945c6bd05f05d567be65357210210bfaae48e..9fe114620b9d670667157525d76e3b73bd8660f3 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -912,8 +912,12 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo void module_arch_cleanup (struct module *mod) { - if (mod->arch.init_unw_table) + if (mod->arch.init_unw_table) { unw_remove_unwind_table(mod->arch.init_unw_table); - if (mod->arch.core_unw_table) + mod->arch.init_unw_table = NULL; + } + if (mod->arch.core_unw_table) { unw_remove_unwind_table(mod->arch.core_unw_table); + mod->arch.core_unw_table = NULL; + } } diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c index b3536a82a26202b65d81b4c9d2cffc068c3a22c9..e002084af10123f550670407e78960a71350386b 100644 --- a/arch/m68k/kernel/uboot.c +++ b/arch/m68k/kernel/uboot.c @@ -103,5 +103,5 @@ __init void process_uboot_commandline(char *commandp, int size) } parse_uboot_commandline(commandp, len); - commandp[size - 1] = 0; + commandp[len - 1] = 0; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 6cd230434f3264aa4039efeba369b5dd5e9ca2da..92bcde046b6b462cd45542fc356ede3a93281d02 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -792,7 +792,6 @@ config SIBYTE_SWARM select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_LITTLESUR bool "Sibyte BCM91250C2-LittleSur" @@ -815,7 +814,6 @@ config SIBYTE_SENTOSA select SYS_HAS_CPU_SB1 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SIBYTE_BIGSUR bool "Sibyte BCM91480B-BigSur" @@ -829,7 +827,6 @@ config SIBYTE_BIGSUR select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN select ZONE_DMA32 if 64BIT - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI config SNI_RM bool "SNI RM200/300/400" diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c index e81ce4623070ea1ea36d926b16018a0ed1b2c661..06fb94370c7c9c554f0cd9a66b2700f753c08488 100644 --- a/arch/mips/bcm47xx/workarounds.c +++ b/arch/mips/bcm47xx/workarounds.c @@ -4,9 +4,8 @@ #include #include -static void __init bcm47xx_workarounds_netgear_wnr3500l(void) +static void __init bcm47xx_workarounds_enable_usb_power(int usb_power) { - const int usb_power = 12; int err; err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power"); @@ -22,7 +21,10 @@ void __init bcm47xx_workarounds(void) switch (board) { case BCM47XX_BOARD_NETGEAR_WNR3500L: - bcm47xx_workarounds_netgear_wnr3500l(); + bcm47xx_workarounds_enable_usb_power(12); + break; + case BCM47XX_BOARD_NETGEAR_WNDR3400_V3: + bcm47xx_workarounds_enable_usb_power(21); break; default: /* No workaround(s) needed */ diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index 7019e2967009e98e6f6191c1e86969d4663a957f..bbbf8057565b236e69561439f8326106204ff82b 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -84,7 +84,7 @@ void __init prom_init(void) * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ - memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); + memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index d1fe51edf5e6d005a6c0b202256c3d992d47e814..4d411da2497b3d8622065db8eac8e0a8e0f35627 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -119,7 +119,7 @@ #define BCM6368_RESET_DSL 0 #define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK #define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK -#define BCM6368_RESET_ENETSW 0 +#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK #define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK #define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK #define BCM6368_RESET_PCIE 0 diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi index cf47ed4d85694fd60d4d4c0e976d39c2c4864d17..1fda24fc186063a67c3c89a8466d3694825e681e 100644 --- a/arch/mips/boot/dts/qca/ar9331.dtsi +++ b/arch/mips/boot/dts/qca/ar9331.dtsi @@ -98,7 +98,7 @@ miscintc: interrupt-controller@18060010 { compatible = "qca,ar7240-misc-intc"; - reg = <0x18060010 0x4>; + reg = <0x18060010 0x8>; interrupt-parent = <&cpuintc>; interrupts = <6>; diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index f3f60056bc274915fdeaf47f03a8a1b1eb8c0166..fb5651b99ab2a7739a980fffe02d4bfaf864845a 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -637,7 +637,6 @@ CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_ADUTUX=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index c2b4e3f33a7382c84316c45f9fa31e72d462af06..4f6b45f64c2f6f6a58a8c603c1a8e0e026ab6e8b 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -350,7 +350,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_XIRCOM=m CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_RIO500=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m CONFIG_USB_LED=m diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c index 6aa264b9856ac99b71b88d54fae19cea27d782de..7c6151d412bd7708a36d4a14cdff36498846f6b3 100644 --- a/arch/mips/fw/sni/sniprom.c +++ b/arch/mips/fw/sni/sniprom.c @@ -42,7 +42,7 @@ /* O32 stack has to be 8-byte aligned. */ static u64 o32_stk[4096]; -#define O32_STK &o32_stk[sizeof(o32_stk)] +#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)]) #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \ __asm__(#fun " = call_o32") diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index a92aee7b977acb56539f74f7f9db2d9d0c1291a5..23f55af7d6bad4c9de44a7faa96ab22af096e405 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void) #endif } -extern char bmips_reset_nmi_vec; -extern char bmips_reset_nmi_vec_end; -extern char bmips_smp_movevec; -extern char bmips_smp_int_vec; -extern char bmips_smp_int_vec_end; +extern char bmips_reset_nmi_vec[]; +extern char bmips_reset_nmi_vec_end[]; +extern char bmips_smp_movevec[]; +extern char bmips_smp_int_vec[]; +extern char bmips_smp_int_vec_end[]; extern int bmips_smp_enabled; extern int bmips_cpu_offset; diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h index 493a3cc7c39ad5a412d6d460061b740260dfff9c..cfdbe66575f4d82012d6350bfad2a1c624639e71 100644 --- a/arch/mips/include/asm/kexec.h +++ b/arch/mips/include/asm/kexec.h @@ -12,11 +12,11 @@ #include /* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ -#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) /* Reserve 3*4096 bytes for board-specific info */ #define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096) diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 060f23ff181718fb6b9dec3f3a206f1013452956..258158c34df188f4c0b7f6e3d759b47ce587915a 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; extern cpumask_t cpu_foreign_map[]; -#define raw_smp_processor_id() (current_thread_info()->cpu) +static inline int raw_smp_processor_id(void) +{ +#if defined(__VDSO__) + extern int vdso_smp_processor_id(void) + __compiletime_error("VDSO should not call smp_processor_id()"); + return vdso_smp_processor_id(); +#else + return current_thread_info()->cpu; +#endif +} +#define raw_smp_processor_id raw_smp_processor_id /* Map from cpu id to sequential logical cpu number. This will only not be idempotent when cpus failed to come on-line. */ diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index d4a293b68249b3cfefe4466b606e3c7847980e89..416d53f587e7c076d79c8551ab19d1ef86623a93 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -453,10 +453,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end) static inline void bmips_nmi_handler_setup(void) { - bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, - &bmips_reset_nmi_vec_end); - bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, - &bmips_smp_int_vec_end); + bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec, + bmips_reset_nmi_vec_end); + bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec, + bmips_smp_int_vec_end); } struct reset_vec_info { diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88665febfcad2964690beff5000a0d..12abf14aed4a37569ff19044a74bf1b854205d97 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -43,6 +43,10 @@ else $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) endif +# Some -march= flags enable MMI instructions, and GCC complains about that +# support being enabled alongside -msoft-float. Thus explicitly disable MMI. +cflags-y += $(call cc-option,-mno-loongson-mmi) + # # Loongson Machines' Support # diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c index ffefc1cb26121e7b653b64b661b267cac4b4e1cd..98c3a7feb10f8b2391c968661e5e76c8d6c2770e 100644 --- a/arch/mips/loongson64/common/serial.c +++ b/arch/mips/loongson64/common/serial.c @@ -110,7 +110,7 @@ static int __init serial_init(void) } module_init(serial_init); -static void __init serial_exit(void) +static void __exit serial_exit(void) { platform_device_unregister(&uart8250_device); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 3cc5b2e4263c0def6d9524fc7dca238ada8ac4e3..f625fd20b21e60de9460e194ebba368548158cc2 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (cpu_has_rixi && !!_PAGE_NO_EXEC) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -661,6 +661,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, int restore_scratch) { if (restore_scratch) { + /* + * Ensure the MFC0 below observes the value written to the + * KScratch register by the prior MTC0. + */ + if (scratch_reg >= 0) + uasm_i_ehb(p); + /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); @@ -675,12 +682,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_il_b(p, r, lid); } - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { /* Reset default page size */ if (PM_DEFAULT_MASK >> 16) { @@ -922,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, } if (mode != not_refill && check_for_high_segbits) { uasm_l_large_segbits_fault(l, *p); + + if (mode == refill_scratch && scratch_reg >= 0) + uasm_i_ehb(p); + /* * We get here if we are an xsseg address, or if we are * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. @@ -938,12 +947,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, uasm_i_jr(p, ptr); if (mode == refill_scratch) { - if (scratch_reg >= 0) { - uasm_i_ehb(p); + if (scratch_reg >= 0) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); - } else { + else UASM_i_LW(p, 1, scratchpad_offset(0), 0); - } } else { uasm_i_nop(p); } diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index 3ef3fb65813697b6ac7bf4ef1239399180fbdffb..b3d6bf23a6620c5837f3ac9fcc2fb3d6357cfba3 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,5 +1,4 @@ obj-y := cfe.o -obj-$(CONFIG_SWIOTLB) += dma.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c deleted file mode 100644 index eb47a94f3583edfa47fb17a3090ad89065c5fa7f..0000000000000000000000000000000000000000 --- a/arch/mips/sibyte/common/dma.c +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * DMA support for Broadcom SiByte platforms. - * - * Copyright (c) 2018 Maciej W. Rozycki - */ - -#include -#include - -void __init plat_swiotlb_setup(void) -{ - swiotlb_init(1); -} diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index a1d98b5c8fd6757683d1e9c23196660676006a54..5c53b8aa43d26043c790adda09436af0f8e1fb09 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -959,12 +959,11 @@ void __init txx9_sramc_init(struct resource *r) goto exit_put; err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); if (err) { - device_unregister(&dev->dev); iounmap(dev->base); - kfree(dev); + device_unregister(&dev->dev); } return; exit_put: + iounmap(dev->base); put_device(&dev->dev); - return; } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 0b845cc7fbdc75af4de0a2f2c580d741ca754441..adfaee2dce34c94d277148c5e489a945b46d91c4 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -6,7 +6,10 @@ ccflags-vdso := \ $(filter -I%,$(KBUILD_CFLAGS)) \ $(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ - $(filter -march=%,$(KBUILD_CFLAGS)) + $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ + $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \ + -D__VDSO__ cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 838d0259cd2715e034ce7787eba12ac4cc7ae9c4..3741f91fc186d076beb653efb6cf5c15a9246070 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -2,7 +2,7 @@ * arch/parisc/mm/ioremap.c * * (C) Copyright 1995 1996 Linus Torvalds - * (C) Copyright 2001-2006 Helge Deller + * (C) Copyright 2001-2019 Helge Deller * (C) Copyright 2005 Kyle McMartin */ @@ -83,7 +83,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l addr = (void __iomem *) area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { - vfree(addr); + vunmap(addr); return NULL; } @@ -91,9 +91,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } EXPORT_SYMBOL(__ioremap); -void iounmap(const volatile void __iomem *addr) +void iounmap(const volatile void __iomem *io_addr) { - if (addr > high_memory) - return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); + unsigned long addr = (unsigned long)io_addr & PAGE_MASK; + + if (is_vmalloc_addr((void *)addr)) + vunmap((void *)addr); } EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index 7e3789ea396b88103644cf14726337fb98b00392..0b3db6322c793223ab53a351d023ce24fd8746c7 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -4,6 +4,8 @@ #include #include +#define INT_MAX ((int)(~0U>>1)) + #include "of.h" typedef u32 uint32_t; diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index f3daa175f86cb06b6d68650833fb9118d557e927..c06cfdf12c0be1681fa0a1122fc563bc353b8775 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -124,7 +124,10 @@ extern int __ucmpdi2(u64, u64); /* Patch sites */ extern s32 patch__call_flush_count_cache; extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; extern long flush_count_cache; +extern long kvm_flush_link_stack; #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index f4c7467f74655458e3f3269f3aa9a630cf99f91e..b73ab8a7ebc3ff22d16305beb5aed29a419752a6 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, pagefault_enable(); - if (!ret) - *oval = oldval; + *oval = oldval; return ret; } diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 759597bf0fd867bd6d4c151acb8acce7f7f3ff6b..ccf44c135389a111023a244498f9fab60a8583f5 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Software required to flush count cache on context switch #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +// Software required to flush link stack on context switch +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + // Features enabled by default #define SEC_FTR_DEFAULT \ diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 1abd8dd77ec134ff3d238c28a2251fed3a1396ae..eee2131a97e61ad2067ac483505a22dc562e8ce6 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -370,7 +370,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) while (parent) { if (!(parent->type & EEH_PE_INVALID)) break; - parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); + parent->type &= ~EEH_PE_INVALID; parent = parent->parent; } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 390ebf4ef384aa6769c5465a4be945d3435cd30e..38f0a75014eb0eba97e0cb9f7bdd3ca01ab3d8bb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -510,6 +510,7 @@ flush_count_cache: /* Save LR into r9 */ mflr r9 + // Flush the link stack .rept 64 bl .+4 .endr @@ -519,6 +520,11 @@ flush_count_cache: .balign 32 /* Restore LR */ 1: mtlr r9 + + // If we're just flushing the link stack, return here +3: nop + patch_site 3b patch__flush_link_stack_return + li r9,0x7fff mtctr r9 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 92474227262b499cd4682aa425aa5617c546ca64..0c8b966e807022639504dab55f921ce49f14fab7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -467,6 +467,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early) RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ +BEGIN_FTR_SECTION + ld r10,ORIG_GPR3(r1) + mtspr SPRN_CFAR,r10 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) MACHINE_CHECK_HANDLER_WINDUP b machine_check_pSeries diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5f202a566ec5f0296a22a71ac238a63a6a90ba7f..9bfdd2510fd5e7d7167d601008cf164c2596cf8a 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -765,9 +765,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, vaddr = page_address(page) + offset; uaddr = (unsigned long)vaddr; - npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); if (tbl) { + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); align = 0; if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 47c6c0401b3a2c48ef1b51f5d0b60a96db900ee4..54c95e7c74cce0cae2fe299ffc72726d24a5606f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -576,12 +576,11 @@ void flush_all_to_thread(struct task_struct *tsk) if (tsk->thread.regs) { preempt_disable(); BUG_ON(tsk != current); - save_all(tsk); - #ifdef CONFIG_SPE if (tsk->thread.regs->msr & MSR_SPE) tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); #endif + save_all(tsk); preempt_enable(); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 6a3e5de544ce2f2b6a3311338daafd19b442f6b5..641f3e4c3380891e834052004762b78db23d274e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -874,15 +874,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, return 0; for_each_cpu(cpu, cpus) { + struct device *dev = get_cpu_device(cpu); + switch (state) { case DOWN: - cpuret = cpu_down(cpu); + cpuret = device_offline(dev); break; case UP: - cpuret = cpu_up(cpu); + cpuret = device_online(dev); break; } - if (cpuret) { + if (cpuret < 0) { pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", __func__, ((state == UP) ? "up" : "down"), @@ -971,6 +973,8 @@ int rtas_ibm_suspend_me(u64 handle) data.token = rtas_token("ibm,suspend-me"); data.complete = &done; + lock_device_hotplug(); + /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); @@ -980,6 +984,7 @@ int rtas_ibm_suspend_me(u64 handle) goto out; } + cpu_hotplug_disable(); stop_topology_update(); /* Call function on all CPUs. One of us will make the @@ -994,6 +999,7 @@ int rtas_ibm_suspend_me(u64 handle) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); + cpu_hotplug_enable(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); @@ -1002,6 +1008,7 @@ int rtas_ibm_suspend_me(u64 handle) __func__); out: + unlock_device_hotplug(); free_cpumask_var(offline_mask); return atomic_read(&data.error); } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index f4a98d9c5913b40b96fc959dbacab13d5143f1b6..11fff9669cfdf1459047b7bc6834133dd3b370ab 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -25,11 +25,12 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_HW = 0x4, }; static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +static bool link_stack_flush_enabled; bool barrier_nospec_enabled; static bool no_nospec; static bool btb_flush_enabled; -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) static bool no_spectrev2; #endif @@ -107,7 +108,7 @@ static __init int barrier_nospec_debugfs_init(void) device_initcall(barrier_nospec_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) static int __init handle_nospectre_v2(char *p) { no_spectrev2 = true; @@ -115,6 +116,9 @@ static int __init handle_nospectre_v2(char *p) return 0; } early_param("nospectre_v2", handle_nospectre_v2); +#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_FSL_BOOK3E void setup_spectre_v2(void) { if (no_spectrev2) @@ -202,11 +206,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { seq_buf_printf(&s, "Mitigation: Software count cache flush"); if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (btb_flush_enabled) { seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); } else { @@ -367,18 +379,49 @@ static __init int stf_barrier_debugfs_init(void) device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +static void no_count_cache_flush(void) +{ + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); +} + static void toggle_count_cache_flush(bool enable) { - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) + enable = false; + + if (!enable) { patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: software flush disabled.\n"); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); +#endif + pr_info("link-stack-flush: software flush disabled.\n"); + link_stack_flush_enabled = false; + no_count_cache_flush(); return; } + // This enables the branch from _switch to flush_count_cache patch_branch_site(&patch__call_flush_count_cache, (u64)&flush_count_cache, BRANCH_SET_LINK); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + // This enables the branch from guest_exit_cont to kvm_flush_link_stack + patch_branch_site(&patch__call_kvm_flush_link_stack, + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); +#endif + + pr_info("link-stack-flush: software flush enabled.\n"); + link_stack_flush_enabled = true; + + // If we just need to flush the link stack, patch an early return + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + no_count_cache_flush(); + return; + } + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { count_cache_flush_type = COUNT_CACHE_FLUSH_SW; pr_info("count-cache-flush: full software flush sequence enabled.\n"); @@ -392,7 +435,26 @@ static void toggle_count_cache_flush(bool enable) void setup_count_cache_flush(void) { - toggle_count_cache_flush(true); + bool enable = true; + + if (no_spectrev2 || cpu_mitigations_off()) { + if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || + security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); + + enable = false; + } + + /* + * There's no firmware feature flag/hypervisor bit to tell us we need to + * flush the link stack on context switch. So we set it here if we see + * either of the Spectre v2 mitigations that aim to protect userspace. + */ + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); + + toggle_count_cache_flush(enable); } #ifdef CONFIG_DEBUG_FS diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 3745113fcc652d8ca3e66692aaab7d87f7ea9338..2a7eb5452aba79fc4d2b2c709642b0e71cfc6130 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 6b2b69616e7762507f3375300513834f328bb503..7b341b86216c29b3c8fc909e68478a128866974e 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -139,6 +139,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index abf17feffe4048af18382a075b01553f7e9be00e..bf966869151169b6b101675b4822609375058019 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 3820213248836474c3db105f0a4fe2d0fad3d0d7..09b2a49f6dd53f907248d27d65a077c8f1ccf2a1 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -124,6 +124,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 73c3c127d858443867003f04fd910a25f6658b1b..209cad89a11a5b9809d388017fb2a5a43294aa52 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { ulong pc = kvmppc_get_pc(vcpu); + ulong lr = kvmppc_get_lr(vcpu); if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; } } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 79a180cf4c94257710646c59502674bee45087ab..4b60bec20603c732a968430d3a940a944159d84f 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -1266,6 +1267,10 @@ mc_cont: bl kvmhv_accumulate_time #endif + /* Possibly flush the link stack here. */ +1: nop + patch_site 1b patch__call_kvm_flush_link_stack + stw r12, STACK_SLOT_TRAP(r1) mr r3, r12 /* Increment exit count, poke other threads to exit */ @@ -1685,6 +1690,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtlr r0 blr +.balign 32 +.global kvm_flush_link_stack +kvm_flush_link_stack: + /* Save LR into r0 */ + mflr r0 + + /* Flush the link stack. On Power8 it's up to 32 entries in size. */ + .rept 32 + bl .+4 + .endr + + /* And on Power9 it's up to 64. */ +BEGIN_FTR_SECTION + .rept 32 + bl .+4 + .endr +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + + /* Restore LR */ + mtlr r0 + blr + /* * Check whether an HDSI is an HPTE not found fault or something else. * If it is an HPTE not found fault that is due to the guest accessing diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 44c33ee397a02b22d3d8c6f414072512aa821054..2525f23da4be1160250745a2fd40580717bfa543 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -287,14 +287,6 @@ void __init radix__early_init_devtree(void) mmu_psize_defs[MMU_PAGE_64K].shift = 16; mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; found: -#ifdef CONFIG_SPARSEMEM_VMEMMAP - if (mmu_psize_defs[MMU_PAGE_2M].shift) { - /* - * map vmemmap using 2M if available - */ - mmu_vmemmap_psize = MMU_PAGE_2M; - } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ return; } @@ -337,7 +329,13 @@ void __init radix__early_init_mmu(void) #ifdef CONFIG_SPARSEMEM_VMEMMAP /* vmemmap mapping */ - mmu_vmemmap_psize = mmu_virtual_psize; + if (mmu_psize_defs[MMU_PAGE_2M].shift) { + /* + * map vmemmap using 2M if available + */ + mmu_vmemmap_psize = MMU_PAGE_2M; + } else + mmu_vmemmap_psize = mmu_virtual_psize; #endif /* * initialize page table size diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 64c9a91773af48449f3886dd0ab29df3f1b66f6e..96c41b55b106bbd4c3fc538c316f96e76480b464 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -321,7 +321,7 @@ void slb_initialize(void) #endif } - get_paca()->stab_rr = SLB_NUM_BOLTED; + get_paca()->stab_rr = SLB_NUM_BOLTED - 1; lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 7fb61ebc99a2d09dd8365c3e70dd64f7b3588024..c34a44e04c87974de8f4a1b092c26dde682a0d58 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -579,7 +579,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj, bin_attr->size); } -static BIN_ATTR_RO(symbol_map, 0); +static struct bin_attribute symbol_map_attr = { + .attr = {.name = "symbol_map", .mode = 0400}, + .read = symbol_map_read +}; static void opal_export_symmap(void) { @@ -596,10 +599,10 @@ static void opal_export_symmap(void) return; /* Setup attributes */ - bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); - bin_attr_symbol_map.size = be64_to_cpu(syms[1]); + symbol_map_attr.private = __va(be64_to_cpu(syms[0])); + symbol_map_attr.size = be64_to_cpu(syms[1]); - rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map); + rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr); if (rc) pr_warn("Error %d creating OPAL symbols file\n", rc); } diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index 3db53e8aff9279cfe761ac9f43926559e347eaf4..9b2ef76578f06042e8d01dc4ffd3da5b4f221e12 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -664,7 +664,7 @@ static int update_flash_db(void) db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff); count = os_area_flash_write(db, sizeof(struct os_area_db), pos); - if (count < sizeof(struct os_area_db)) { + if (count < 0 || count < sizeof(struct os_area_db)) { pr_debug("%s: os_area_flash_write failed %zd\n", __func__, count); error = count < 0 ? count : -EIO; diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 39049e4884fbd30e8cf261f867da8309a98e9532..7a4d172c93765cdd3dfb59b4a06ef36db4bbf974 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -150,7 +150,7 @@ static int dtl_start(struct dtl *dtl) /* Register our dtl buffer with the hypervisor. The HV expects the * buffer size to be passed in the second word of the buffer */ - ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; + ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES); hwcpu = get_hard_smp_processor_id(dtl->cpu); addr = __pa(dtl->buf); @@ -185,7 +185,7 @@ static void dtl_stop(struct dtl *dtl) static u64 dtl_current_index(struct dtl *dtl) { - return lppaca_of(dtl->cpu).dtl_idx; + return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c0a0947f43bbb71cdc8f5c09e4255659c9149679..656bbbd731d03b7eddb7cb75285c0238661a7c5e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -616,7 +616,7 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) nid = memory_add_physaddr_to_nid(lmb->base_addr); /* Add the memory */ - rc = add_memory(nid, lmb->base_addr, block_sz); + rc = __add_memory(nid, lmb->base_addr, block_sz); if (rc) { dlpar_remove_device_tree_lmb(lmb); dlpar_release_drc(lmb->drc_index); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 3784a7abfcc8033a2193d07c5190d48a82fa61d4..74791e8382d22781c7baa48bea9852150f694a0a 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope) prop_data += vd; } + + cond_resched(); } + + cond_resched(); } while (rtas_rc == 1); of_node_put(dn); @@ -282,8 +287,12 @@ int pseries_devicetree_update(s32 scope) add_dt_node(phandle, drc_index); break; } + + cond_resched(); } } + + cond_resched(); } while (rc == 1); kfree(rtas_buf); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index adb09ab87f7c0a52bc97e6200d4c6bab63d788c7..30782859d89805ac98d735648188ebfb755b943b 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -298,6 +298,9 @@ static void pseries_lpar_idle(void) * low power mode by ceding processor to hypervisor */ + if (!prep_irq_for_idle()) + return; + /* Indicate to hypervisor that we are idle. */ get_lppaca()->idle = 1; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 591cbdf615af046d4e0662241b03c3b113280333..1a906dd7ca7d9eab324464bb4173557c3fc60936 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -572,6 +572,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_encrypt(desc, dst, src, nbytes); @@ -586,6 +589,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_decrypt(desc, dst, src, nbytes); diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 2a17123130d304eb7316cfc9e07306316f8d7b2b..224aeda1e8ccfc0bf44da78296b8a0f1fcd12c5d 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -267,7 +267,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root) static int hypfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root_inode; - struct dentry *root_dentry; + struct dentry *root_dentry, *update_file; int rc = 0; struct hypfs_sb_info *sbi; @@ -298,9 +298,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) rc = hypfs_diag_create_files(root_dentry); if (rc) return rc; - sbi->update_file = hypfs_create_update_file(root_dentry); - if (IS_ERR(sbi->update_file)) - return PTR_ERR(sbi->update_file); + update_file = hypfs_create_update_file(root_dentry); + if (IS_ERR(update_file)) + return PTR_ERR(update_file); + sbi->update_file = update_file; hypfs_update_update(sb); pr_info("Hypervisor filesystem mounted\n"); return 0; diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index a7ef702201260b4a39e8cff88076f4e433af0458..31b2913372b56efe2adf08fe6029bbf5f7e93da6 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -151,7 +151,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, __rc; \ }) -static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) +static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { unsigned long spec = 0x810000UL; int rc; @@ -181,7 +181,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) return rc; } -static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) +static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { unsigned long spec = 0x81UL; int rc; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 96e4fcad57bf7008ea4932f46a3df06ee30e5d85..f46e5c0cb6d95d5b224ac940227d9b7fd4f00fa9 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1611,14 +1611,17 @@ static int __init init_cpum_sampling_pmu(void) } sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); - if (!sfdbg) + if (!sfdbg) { pr_err("Registering for s390dbf failed\n"); + return -ENOMEM; + } debug_register_view(sfdbg, &debug_sprintf_view); err = register_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert); if (err) { pr_cpumsf_err(RS_INIT_FAILURE_ALRT); + debug_unregister(sfdbg); goto out; } @@ -1627,6 +1630,7 @@ static int __init init_cpum_sampling_pmu(void) pr_cpumsf_err(RS_INIT_FAILURE_PERF); unregister_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert); + debug_unregister(sfdbg); goto out; } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 239f29508f0bfacd579d449a7e738703d655b661..69ac47241b19c3160bd27c2ec5e1237fb4037e8b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -256,7 +256,8 @@ int arch_update_cpu_topology(void) topology_update_polarization_simple(); for_each_online_cpu(cpu) { dev = get_cpu_device(cpu); - kobject_uevent(&dev->kobj, KOBJ_CHANGE); + if (dev) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } return rc; } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index ca7c3c34f94ba835a0a1467c568a69bc58654d95..2bb3a255e51a4f86cfe5ae3daa80fac1e785b42f 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -24,9 +24,10 @@ obj-y += vdso32_wrapper.o extra-y += vdso32.lds CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) -# Disable gcov profiling and ubsan for VDSO code +# Disable gcov profiling, ubsan and kasan for VDSO code GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n # Force dependency (incbin is bad) $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 84af2b6b64c4267a7a3e802e0c59bdfb0b8c6f58..76c56b5382be9bfe1744b5b61230b115c62e6c68 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -24,9 +24,10 @@ obj-y += vdso64_wrapper.o extra-y += vdso64.lds CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) -# Disable gcov profiling and ubsan for VDSO code +# Disable gcov profiling, ubsan and kasan for VDSO code GCOV_PROFILE := n UBSAN_SANITIZE := n +KASAN_SANITIZE := n # Force dependency (incbin is bad) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index be4db07f70d385af8f8b1230943eb8cbbd747d0a..95126d25aed5b27afabb54f3c13883ecf117be0f 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1652,6 +1652,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, case KVM_S390_MCHK: irq->u.mchk.mcic = s390int->parm64; break; + case KVM_S390_INT_PFAULT_INIT: + irq->u.ext.ext_params = s390int->parm; + irq->u.ext.ext_params2 = s390int->parm64; + break; + case KVM_S390_RESTART: + case KVM_S390_INT_CLOCK_COMP: + case KVM_S390_INT_CPU_TIMER: + break; + default: + return -EINVAL; } return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 07f5719006763265751b444829a40d9767d2389d..37c254677ccdaa56e18d329788b6c8464ac676f4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1422,13 +1422,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); if (!kvm->arch.sca) goto out_err; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); sca_offset += 16; if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE) sca_offset = 0; kvm->arch.sca = (struct bsca_block *) ((char *) kvm->arch.sca + sca_offset); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); sprintf(debug_name, "kvm-%u", current->pid); @@ -3033,7 +3033,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION | KVM_S390_MEMOP_F_CHECK_ONLY; - if (mop->flags & ~supported_flags) + if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) return -EINVAL; if (mop->size > MEM_OP_MAX_SIZE) @@ -3105,7 +3105,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; - struct kvm_s390_irq s390irq; + struct kvm_s390_irq s390irq = {}; r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 79ddd580d60539dbcdb7114bf1523290fa300aa2..ca6fab51eea1bde9dd2e0aa10ba820e2469206cf 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -306,16 +306,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, } if (write) { - len = *lenp; - if (copy_from_user(buf, buffer, - len > sizeof(buf) ? sizeof(buf) : len)) + len = min(*lenp, sizeof(buf)); + if (copy_from_user(buf, buffer, len)) return -EFAULT; - buf[sizeof(buf) - 1] = '\0'; + buf[len - 1] = '\0'; cmm_skip_blanks(buf, &p); nr = simple_strtoul(p, &p, 0); cmm_skip_blanks(p, &p); seconds = simple_strtoul(p, &p, 0); cmm_set_timeout(nr, seconds); + *ppos += *lenp; } else { len = sprintf(buf, "%ld %ld\n", cmm_timeout_pages, cmm_timeout_seconds); @@ -323,9 +323,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; + *lenp = len; + *ppos += len; } - *lenp = len; - *ppos += len; return 0; } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 896344b6e0363357d9386b55557955c4c5782685..9b15a1dc662878b64d56d54cb4eb0de737711594 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -881,7 +881,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i break; case BPF_ALU64 | BPF_NEG: /* dst = -dst */ /* lcgr %dst,%dst */ - EMIT4(0xb9130000, dst_reg, dst_reg); + EMIT4(0xb9030000, dst_reg, dst_reg); break; /* * BPF_FROM_BE/LE @@ -1062,8 +1062,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* llgf %w1,map.max_entries(%b2) */ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); - /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ - EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, + /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0, 0xa); /* @@ -1089,8 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i * goto out; */ - /* sllg %r1,%b3,3: %r1 = index * 8 */ - EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); + /* llgfr %r1,%b3: %r1 = (u32) index */ + EMIT4(0xb9160000, REG_1, BPF_REG_3); + /* sllg %r1,%r1,3: %r1 *= 8 */ + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); /* lg %r1,prog(%b2,%r1) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index faa2f61058c271abf9ce799a02c9a55678464533..92f0a46ace78ec2638338879283ad65523b74905 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -40,7 +40,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long return val; } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ +({ __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) void __xchg_called_with_bad_pointer(void); diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index f005ccac91cc95cc4828f1fce8137c60212aefb0..e87c0f81b700e7fe0ce2c499de5cd3be10a1a060 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -20,6 +20,7 @@ */ #define HAS_DMA +#ifdef CONFIG_PARPORT_PC_FIFO static DEFINE_SPINLOCK(dma_spin_lock); #define claim_dma_lock() \ @@ -30,6 +31,7 @@ static DEFINE_SPINLOCK(dma_spin_lock); #define release_dma_lock(__flags) \ spin_unlock_irqrestore(&dma_spin_lock, __flags); +#endif static struct sparc_ebus_info { struct ebus_dma_info info; diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 62087028a9ce1e079d10d791117069a6a065093f..d2ad45c1011372dbac1c8e3a6baefa0bdf5bef9f 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -260,7 +260,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) if (err == 0) { spin_unlock(&line->lock); return IRQ_NONE; - } else if (err < 0) { + } else if ((err < 0) && (err != -EAGAIN)) { line->head = line->buffer; line->tail = line->buffer; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index af3eeb8cfbf94207ecb7072d25e6713643793bce..aa22b0d1609ed030d0fcf1616058ad5d91d6cb8d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1755,6 +1755,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS If unsure, say y. +choice + prompt "TSX enable mode" + depends on CPU_SUP_INTEL + default X86_INTEL_TSX_MODE_OFF + help + Intel's TSX (Transactional Synchronization Extensions) feature + allows to optimize locking protocols through lock elision which + can lead to a noticeable performance boost. + + On the other hand it has been shown that TSX can be exploited + to form side channel attacks (e.g. TAA) and chances are there + will be more of those attacks discovered in the future. + + Therefore TSX is not enabled by default (aka tsx=off). An admin + might override this decision by tsx=on the command line parameter. + Even with TSX enabled, the kernel will attempt to enable the best + possible TAA mitigation setting depending on the microcode available + for the particular machine. + + This option allows to set the default tsx mode between tsx=on, =off + and =auto. See Documentation/kernel-parameters.txt for more + details. + + Say off if not sure, auto if TSX is in use but it should be used on safe + platforms or on if TSX is in use and the security aspect of tsx is not + relevant. + +config X86_INTEL_TSX_MODE_OFF + bool "off" + help + TSX is disabled if possible - equals to tsx=off command line parameter. + +config X86_INTEL_TSX_MODE_ON + bool "on" + help + TSX is always enabled on TSX capable HW - equals the tsx=on command + line parameter. + +config X86_INTEL_TSX_MODE_AUTO + bool "auto" + help + TSX is enabled on TSX capable HW that is believed to be safe against + side channel attacks- equals the tsx=auto command line parameter. +endchoice + config EFI bool "EFI runtime service support" depends on ACPI @@ -2569,8 +2614,7 @@ config OLPC config OLPC_XO1_PM bool "OLPC XO-1 Power Management" - depends on OLPC && MFD_CS5535 && PM_SLEEP - select MFD_CORE + depends on OLPC && MFD_CS5535=y && PM_SLEEP ---help--- Add support for poweroff and suspend of the OLPC XO-1 laptop. diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f20c83a6393d732bf5189df02a30bb3469a9f2bf..47be4f5db0fbc7419fca49f893bf63ddce458337 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) export REALMODE_CFLAGS diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 054c5338770a8581e29907bdea32b26e794c0298..7f1277f371e76bacbdf41ef099804eb024343e39 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -30,6 +30,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_PCSPKR_PLATFORM is not set CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y @@ -211,6 +212,7 @@ CONFIG_NET_EMATCH_U32=y CONFIG_NET_CLS_ACT=y CONFIG_VSOCKETS=y CONFIG_VIRTIO_VSOCKETS=y +CONFIG_BPF_JIT=y CONFIG_CFG80211=y CONFIG_MAC80211=y CONFIG_RFKILL=y @@ -236,6 +238,7 @@ CONFIG_SCSI_VIRTIO=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y CONFIG_DM_MIRROR=y CONFIG_DM_ZERO=y CONFIG_DM_UEVENT=y diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index fd4484ae3ffca3631e0f4475aa84149fe5fcb46f..5f72b473f3ed3f947de8c89a39ad8cc78f33b469 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -388,7 +388,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 config) { config &= ~perf_ibs->cnt_mask; - wrmsrl(hwc->config_base, config); + if (boot_cpu_data.x86 == 0x10) + wrmsrl(hwc->config_base, config); config &= ~perf_ibs->enable_mask; wrmsrl(hwc->config_base, config); } @@ -563,7 +564,8 @@ static struct perf_ibs perf_ibs_op = { }, .msr = MSR_AMD64_IBSOPCTL, .config_mask = IBS_OP_CONFIG_MASK, - .cnt_mask = IBS_OP_MAX_CNT, + .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT | + IBS_OP_CUR_CNT_RAND, .enable_mask = IBS_OP_ENABLE, .valid_mask = IBS_OP_VAL, .max_period = IBS_OP_MAX_CNT << 4, @@ -624,7 +626,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (event->attr.sample_type & PERF_SAMPLE_RAW) offset_max = perf_ibs->offset_max; else if (check_rip) - offset_max = 2; + offset_max = 3; else offset_max = 1; do { @@ -671,10 +673,17 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) throttle = perf_event_overflow(event, &data, ®s); out: - if (throttle) + if (throttle) { perf_ibs_stop(event, 0); - else - perf_ibs_enable_event(perf_ibs, hwc, period >> 4); + } else { + period >>= 4; + + if ((ibs_caps & IBS_CAPS_RDWROPCNT) && + (*config & IBS_OP_CNT_CTL)) + period |= *config & IBS_OP_CUR_CNT_RAND; + + perf_ibs_enable_event(perf_ibs, hwc, period); + } perf_event_update_userpage(event); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e98e238d377505f64ad1c52a0033401413a262a0..55e362f9dbfaa9f83565294ec9067dddc068b34f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3075,6 +3075,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left) return left; } +static u64 nhm_limit_period(struct perf_event *event, u64 left) +{ + return max(left, 32ULL); +} + PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); @@ -3734,6 +3739,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + x86_pmu.limit_period = nhm_limit_period; x86_pmu.cpu_events = nhm_events_attrs; diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 14635c5ea025138ba49cf5a4305c990c60f1ac6c..76a35c1213d24effa00bee5f94960eb847c28a4b 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -49,7 +49,7 @@ static __always_inline void atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } /** @@ -63,7 +63,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "+m" (v->counter) - : "ir" (i)); + : "ir" (i) : "memory"); } /** @@ -89,7 +89,7 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) static __always_inline void atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" - : "+m" (v->counter)); + : "+m" (v->counter) :: "memory"); } /** @@ -101,7 +101,7 @@ static __always_inline void atomic_inc(atomic_t *v) static __always_inline void atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" - : "+m" (v->counter)); + : "+m" (v->counter) :: "memory"); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 89ed2f6ae2f76accf15ad16810c6aceb4a1854ae..a3248402c36b9783ffc1c9d1a50f9675c4f94add 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -44,7 +44,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) - : "er" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter) : "memory"); } /** @@ -58,7 +58,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) - : "er" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter) : "memory"); } /** @@ -85,7 +85,7 @@ static __always_inline void atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) - : "m" (v->counter)); + : "m" (v->counter) : "memory"); } /** @@ -98,7 +98,7 @@ static __always_inline void atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" : "=m" (v->counter) - : "m" (v->counter)); + : "m" (v->counter) : "memory"); } /** diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index eb53c2c78a1f971d6037b98923c56118c1a1a428..a0f450b21d6762ec7b626a2d57bac90d9d4af740 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -105,8 +105,8 @@ do { \ #endif /* Atomic operations are already serializing on x86 */ -#define __smp_mb__before_atomic() barrier() -#define __smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() do { } while (0) +#define __smp_mb__after_atomic() do { } while (0) #include diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 0232b5a2a2d9125cee6db2f5e0152628a8c0edf2..588d8fbd1e6dea159f9b32ba8f94e9835e321ede 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), BOOT_PARAM_PRESERVE(hdr), + BOOT_PARAM_PRESERVE(e820_map), BOOT_PARAM_PRESERVE(eddbuf), }; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 3a972da155d6ab0fa1b1b3cf5fb21c28f06b3284..ccc4420f051b9b1edc75fbf7d29e96004ae1b00d 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -357,5 +357,7 @@ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ +#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ +#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index b3e32b010ab194ed613034234c403c4067502776..c2c01f84df75f1f9b35a3c898686a82973026d88 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +#define POP_SS_OPCODE 0x1f +#define MOV_SREG_OPCODE 0x8e + +/* + * Intel SDM Vol.3A 6.8.3 states; + * "Any single-step trap that would be delivered following the MOV to SS + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is + * suppressed." + * This function returns true if @insn is MOV SS or POP SS. On these + * instructions, single stepping is suppressed. + */ +static inline int insn_masking_exception(struct insn *insn) +{ + return insn->opcode.bytes[0] == POP_SS_OPCODE || + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); +} + #endif /* _ASM_X86_INSN_H */ diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index ba7b6f736414965011536ee4c240977d11a3989b..74ee597beb3e4e9469fc7ccf2948b8adf17f41a2 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -5,7 +5,7 @@ * "Big Core" Processors (Branded as Core, Xeon, etc...) * * The "_X" parts are generally the EP and EX Xeons, or the - * "Extreme" ones, like Broadwell-E. + * "Extreme" ones, like Broadwell-E, or Atom microserver. * * Things ending in "2" are usually because we have no better * name for them. There's no processor called "SILVERMONT2". @@ -67,6 +67,7 @@ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ +#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ /* Xeon Phi */ diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 282630e4c6ea4696e54c6ea650751d913ce49c11..1624a7ffa95d8928bdcdbe3c2b97cb20289b2da0 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -66,7 +66,7 @@ struct kimage; /* Memory to backup during crash kdump */ #define KEXEC_BACKUP_SRC_START (0UL) -#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */ +#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */ /* * CPU does not save ss and sp on stack if execution is already diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 222cb69e1219dfbc7a5cc0d6c1175233c938d11b..d2c14a96ec285a13d8b4b2619541395ce47bf929 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -261,6 +261,7 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; + struct list_head lpage_disallowed_link; /* * The following two entries are used to key the shadow page in the @@ -273,6 +274,7 @@ struct kvm_mmu_page { /* hold the gfn of each spte inside spt */ gfn_t *gfns; bool unsync; + bool lpage_disallowed; /* Can't be replaced by an equiv large page */ int root_count; /* Currently serving as active root */ unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ @@ -724,6 +726,7 @@ struct kvm_arch { */ struct list_head active_mmu_pages; struct list_head zapped_obsolete_pages; + struct list_head lpage_disallowed_mmu_pages; struct kvm_page_track_notifier_node mmu_sp_tracker; struct kvm_page_track_notifier_head track_notifier_head; @@ -798,6 +801,8 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + + struct task_struct *nx_lpage_recovery_thread; }; struct kvm_vm_stat { @@ -811,6 +816,7 @@ struct kvm_vm_stat { ulong mmu_unsync; ulong remote_tlb_flush; ulong lpages; + ulong nx_lpage_splits; }; struct kvm_vcpu_stat { diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 86166868db8c1794ef12a6e8c509dbcee9ad4112..8d162e0f288130fd1d0baab17c623193f990ea5a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -77,6 +77,18 @@ * Microarchitectural Data * Sampling (MDS) vulnerabilities. */ +#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /* + * The processor is not susceptible to a + * machine check error due to modifying the + * code page size along with either the + * physical address or cache type + * without TLB invalidation. + */ +#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */ +#define ARCH_CAP_TAA_NO BIT(8) /* + * Not susceptible to + * TSX Async Abort (TAA) vulnerabilities. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -87,6 +99,10 @@ #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e +#define MSR_IA32_TSX_CTRL 0x00000122 +#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ +#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 0b40cc442bdac74831bdfd38cfadd56e8f5aa363..58b1b766e84e6fc0b76b04dfa767f0fb8908eb37 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -19,7 +19,7 @@ #define MWAIT_ECX_INTERRUPT_BREAK 0x1 #define MWAITX_ECX_TIMER_ENABLE BIT(1) #define MWAITX_MAX_LOOPS ((u32)-1) -#define MWAITX_DISABLE_CSTATES 0xf +#define MWAITX_DISABLE_CSTATES 0xf0 static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 10a48505abb5eb1e6ea10b05968884aa37011a73..8d56d701b5f7a9923b69404f57f39ff99162cd82 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -314,7 +314,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); #include /** - * mds_clear_cpu_buffers - Mitigation for MDS vulnerability + * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the @@ -337,7 +337,7 @@ static inline void mds_clear_cpu_buffers(void) } /** - * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability + * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * * Clear CPU buffers if the corresponding static key is enabled */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index f353061bba1d0ff9f61f9f09ef6c150dae501266..81d5ea71bbe9462767cbd338c9e706f1ff793d95 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -200,16 +200,20 @@ struct x86_pmu_capability { #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) #define IBSCTL_LVT_OFFSET_MASK 0x0F -/* ibs fetch bits/masks */ +/* IBS fetch bits/masks */ #define IBS_FETCH_RAND_EN (1ULL<<57) #define IBS_FETCH_VAL (1ULL<<49) #define IBS_FETCH_ENABLE (1ULL<<48) #define IBS_FETCH_CNT 0xFFFF0000ULL #define IBS_FETCH_MAX_CNT 0x0000FFFFULL -/* ibs op bits/masks */ -/* lower 4 bits of the current count are ignored: */ -#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) +/* + * IBS op bits/masks + * The lower 7 bits of the current count are random bits + * preloaded by hardware and ignored in software + */ +#define IBS_OP_CUR_CNT (0xFFF80ULL<<32) +#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 155e49fc7010a2b98e2b234724d0035503a28881..92703fa09c195359168c7f6c28cb0aabc26032bf 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -880,4 +880,11 @@ enum mds_mitigations { MDS_MITIGATION_VMWERV, }; +enum taa_mitigations { + TAA_MITIGATION_OFF, + TAA_MITIGATION_UCODE_NEEDED, + TAA_MITIGATION_VERW, + TAA_MITIGATION_TSX_DISABLED, +}; + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index ea78a8438a8af107ff67e107076e193e68011f22..fb489cd848faaa5d27139a79803c939a2fdf8920 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -199,24 +199,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs, (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); } +/** + * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns the address of the @n th entry of the + * kernel stack which is specified by @regs. If the @n th entry is NOT in + * the kernel stack, this returns NULL. + */ +static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return addr; + else + return NULL; +} + +/* To avoid include hell, we can't include uaccess.h */ +extern long probe_kernel_read(void *dst, const void *src, size_t size); + /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * is specified by @regs. If the @n th entry is NOT in the kernel stack * this returns 0. */ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { - unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); - addr += n; - if (regs_within_kernel_stack(regs, (unsigned long)addr)) - return *addr; - else - return 0; + unsigned long *addr; + unsigned long val; + long ret; + + addr = regs_get_kernel_stack_nth_addr(regs, n); + if (addr) { + ret = probe_kernel_read(&val, addr, sizeof(val)); + if (!ret) + return val; + } + return 0; } #define arch_has_single_step() (1) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d25fb6beb2f0c5d8a3ed7894302535fff9b85275..dcaf7100b69c27a43dcae146c43cac9050860b3c 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -177,16 +177,6 @@ extern int safe_smp_processor_id(void); #endif #ifdef CONFIG_X86_LOCAL_APIC - -#ifndef CONFIG_X86_64 -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); -} - -#endif - extern int hard_smp_processor_id(void); #else /* CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 92639c9ee2bd99f777d93753203e36f4eb8b967b..d1ddcc021a8e1a971f845427b5e14904d9992c46 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -444,8 +444,10 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ + __typeof__(ptr) __gu_ptr = (ptr); \ + __typeof__(size) __gu_size = (size); \ __uaccess_begin_nospec(); \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __builtin_expect(__gu_err, 0); \ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 928ffdc21873c8e8d20ad73c0cfc0a55e4782bc1..722a76b88bcc07db864a21ca32a3696ddefcb61c 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1281,6 +1281,56 @@ static void lapic_setup_esr(void) oldvalue, value); } +static void apic_pending_intr_clear(void) +{ + long long max_loops = cpu_khz ? cpu_khz : 1000000; + unsigned long long tsc = 0, ntsc; + unsigned int value, queued; + int i, j, acked = 0; + + if (boot_cpu_has(X86_FEATURE_TSC)) + tsc = rdtsc(); + /* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now CPU has serviced that pending interrupt and + * it might not have done the ack_APIC_irq() because it thought, + * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it + * does not clear the ISR bit and cpu thinks it has already serivced + * the interrupt. Hence a vector might get locked. It was noticed + * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + */ + do { + queued = 0; + for (i = APIC_ISR_NR - 1; i >= 0; i--) + queued |= apic_read(APIC_IRR + i*0x10); + + for (i = APIC_ISR_NR - 1; i >= 0; i--) { + value = apic_read(APIC_ISR + i*0x10); + for (j = 31; j >= 0; j--) { + if (value & (1< 256) { + printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", + acked); + break; + } + if (queued) { + if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { + ntsc = rdtsc(); + max_loops = (cpu_khz << 10) - (ntsc - tsc); + } else + max_loops--; + } + } while (queued && max_loops > 0); + WARN_ON(max_loops <= 0); +} + /** * setup_local_APIC - setup the local APIC * @@ -1290,19 +1340,22 @@ static void lapic_setup_esr(void) void setup_local_APIC(void) { int cpu = smp_processor_id(); - unsigned int value, queued; - int i, j, acked = 0; - unsigned long long tsc = 0, ntsc; - long long max_loops = cpu_khz ? cpu_khz : 1000000; + unsigned int value; - if (boot_cpu_has(X86_FEATURE_TSC)) - tsc = rdtsc(); if (disable_apic) { disable_ioapic_support(); return; } + /* + * If this comes from kexec/kcrash the APIC might be enabled in + * SPIV. Soft disable it before doing further initialization. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && apic->disable_esr) { @@ -1328,16 +1381,21 @@ void setup_local_APIC(void) apic->init_apic_ldr(); #ifdef CONFIG_X86_32 - /* - * APIC LDR is initialized. If logical_apicid mapping was - * initialized during get_smp_config(), make sure it matches the - * actual value. - */ - i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); - WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); - /* always use the value from LDR */ - early_per_cpu(x86_cpu_to_logical_apicid, cpu) = - logical_smp_processor_id(); + if (apic->dest_logical) { + int logical_apicid, ldr_apicid; + + /* + * APIC LDR is initialized. If logical_apicid mapping was + * initialized during get_smp_config(), make sure it matches + * the actual value. + */ + logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); + if (logical_apicid != BAD_APICID) + WARN_ON(logical_apicid != ldr_apicid); + /* Always use the value from LDR. */ + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid; + } #endif /* @@ -1348,45 +1406,7 @@ void setup_local_APIC(void) value &= ~APIC_TPRI_MASK; apic_write(APIC_TASKPRI, value); - /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. - */ - do { - queued = 0; - for (i = APIC_ISR_NR - 1; i >= 0; i--) - queued |= apic_read(APIC_IRR + i*0x10); - - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1< 256) { - printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", - acked); - break; - } - if (queued) { - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { - ntsc = rdtsc(); - max_loops = (cpu_khz << 10) - (ntsc - tsc); - } else - max_loops--; - } - } while (queued && max_loops > 0); - WARN_ON(max_loops <= 0); + apic_pending_intr_clear(); /* * Now that we are all set up, enable the APIC diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d34629d70421f859d7e197a85744950bc576a906..09dd95cabfc285e3ae2d86647f6613b7d922fde7 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2346,7 +2346,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ - return ioapic_initialized ? ioapic_dynirq_base : gsi_top; + if (!ioapic_initialized) + return gsi_top; + /* + * For DT enabled machines ioapic_dynirq_base is irrelevant and not + * updated. So simply return @from if ioapic_dynirq_base == 0. + */ + return ioapic_dynirq_base ? : from; } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 33b63670bf09e6d34c1f194cfe3600d9ea16dcb4..f6e386fe510c955a7e54ebc79b0234b8e3c9df1d 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -25,7 +25,7 @@ obj-y += bugs.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o -obj-$(CONFIG_CPU_SUP_INTEL) += intel.o +obj-$(CONFIG_CPU_SUP_INTEL) += intel.o tsx.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 2a42fef275ad3c16b56cb44c943696be57947863..24307d5bb4b84bd337e863f29ba0fd4374b9a399 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -31,11 +31,15 @@ #include #include +#include "cpu.h" + static void __init spectre_v1_select_mitigation(void); static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); +static void __init mds_print_mitigation(void); +static void __init taa_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ u64 x86_spec_ctrl_base; @@ -102,6 +106,13 @@ void __init check_bugs(void) ssb_select_mitigation(); l1tf_select_mitigation(); mds_select_mitigation(); + taa_select_mitigation(); + + /* + * As MDS and TAA mitigations are inter-related, print MDS + * mitigation until after TAA mitigation selection is done. + */ + mds_print_mitigation(); arch_smt_update(); @@ -240,6 +251,12 @@ static void __init mds_select_mitigation(void) (mds_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); } +} + +static void __init mds_print_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) + return; pr_info("%s\n", mds_strings[mds_mitigation]); } @@ -265,6 +282,113 @@ static int __init mds_cmdline(char *str) } early_param("mds", mds_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "TAA: " fmt + +/* Default mitigation for TAA-affected CPUs */ +static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; +static bool taa_nosmt __ro_after_init; + +static const char * const taa_strings[] = { + [TAA_MITIGATION_OFF] = "Vulnerable", + [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", + [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", +}; + +static void __init taa_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_OFF; + return; + } + + /* TSX previously disabled by tsx=off */ + if (!boot_cpu_has(X86_FEATURE_RTM)) { + taa_mitigation = TAA_MITIGATION_TSX_DISABLED; + goto out; + } + + if (cpu_mitigations_off()) { + taa_mitigation = TAA_MITIGATION_OFF; + return; + } + + /* + * TAA mitigation via VERW is turned off if both + * tsx_async_abort=off and mds=off are specified. + */ + if (taa_mitigation == TAA_MITIGATION_OFF && + mds_mitigation == MDS_MITIGATION_OFF) + goto out; + + if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) + taa_mitigation = TAA_MITIGATION_VERW; + else + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + + /* + * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. + * A microcode update fixes this behavior to clear CPU buffers. It also + * adds support for MSR_IA32_TSX_CTRL which is enumerated by the + * ARCH_CAP_TSX_CTRL_MSR bit. + * + * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode + * update is required. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ( (ia32_cap & ARCH_CAP_MDS_NO) && + !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + + /* + * TSX is enabled, select alternate mitigation for TAA which is + * the same as MDS. Enable MDS static branch to clear CPU buffers. + * + * For guests that can't determine whether the correct microcode is + * present on host, enable the mitigation for UCODE_NEEDED as well. + */ + static_branch_enable(&mds_user_clear); + + if (taa_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); + + /* + * Update MDS mitigation, if necessary, as the mds_user_clear is + * now enabled for TAA mitigation. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } +out: + pr_info("%s\n", taa_strings[taa_mitigation]); +} + +static int __init tsx_async_abort_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_TAA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + taa_mitigation = TAA_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + taa_mitigation = TAA_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_nosmt = true; + } + + return 0; +} +early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -780,13 +904,10 @@ static void update_mds_branch_idle(void) } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" +#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" void arch_smt_update(void) { - /* Enhanced IBRS implies STIBP. No update required. */ - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) - return; - mutex_lock(&spec_ctrl_mutex); switch (spectre_v2_user) { @@ -812,6 +933,17 @@ void arch_smt_update(void) break; } + switch (taa_mitigation) { + case TAA_MITIGATION_VERW: + case TAA_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(TAA_MSG_SMT); + break; + case TAA_MITIGATION_TSX_DISABLED: + case TAA_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -1127,6 +1259,9 @@ void x86_spec_ctrl_setup_ap(void) x86_amd_ssb_disable(); } +bool itlb_multihit_kvm_mitigation; +EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); + #undef pr_fmt #define pr_fmt(fmt) "L1TF: " fmt @@ -1282,11 +1417,24 @@ static ssize_t l1tf_show_state(char *buf) l1tf_vmx_states[l1tf_vmx_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } + +static ssize_t itlb_multihit_show_state(char *buf) +{ + if (itlb_multihit_kvm_mitigation) + return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); + else + return sprintf(buf, "KVM: Vulnerable\n"); +} #else static ssize_t l1tf_show_state(char *buf) { return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); } + +static ssize_t itlb_multihit_show_state(char *buf) +{ + return sprintf(buf, "Processor vulnerable\n"); +} #endif static ssize_t mds_show_state(char *buf) @@ -1308,6 +1456,21 @@ static ssize_t mds_show_state(char *buf) sched_smt_active() ? "vulnerable" : "disabled"); } +static ssize_t tsx_async_abort_show_state(char *buf) +{ + if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || + (taa_mitigation == TAA_MITIGATION_OFF)) + return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sprintf(buf, "%s; SMT Host state unknown\n", + taa_strings[taa_mitigation]); + } + + return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + static char *stibp_state(void) { if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) @@ -1373,6 +1536,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_MDS: return mds_show_state(buf); + case X86_BUG_TAA: + return tsx_async_abort_show_state(buf); + + case X86_BUG_ITLB_MULTIHIT: + return itlb_multihit_show_state(buf); + default: break; } @@ -1409,4 +1578,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu { return cpu_show_common(dev, attr, buf, X86_BUG_MDS); } + +ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_TAA); +} + +ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 12fa160518713c7f8c60c98fa72ec9755c273dd6..477df9782fdfe5ec4dcce10e8c4a1f6339bb509d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -891,13 +891,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) c->x86_cache_bits = c->x86_phys_bits; } -#define NO_SPECULATION BIT(0) -#define NO_MELTDOWN BIT(1) -#define NO_SSB BIT(2) -#define NO_L1TF BIT(3) -#define NO_MDS BIT(4) -#define MSBDS_ONLY BIT(5) -#define NO_SWAPGS BIT(6) +#define NO_SPECULATION BIT(0) +#define NO_MELTDOWN BIT(1) +#define NO_SSB BIT(2) +#define NO_L1TF BIT(3) +#define NO_MDS BIT(4) +#define MSBDS_ONLY BIT(5) +#define NO_SWAPGS BIT(6) +#define NO_ITLB_MULTIHIT BIT(7) #define VULNWL(_vendor, _family, _model, _whitelist) \ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } @@ -915,26 +916,26 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), /* Intel Family 6 */ - VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), - VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), - VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), - VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), - VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), - - VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), + + VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(CORE_YONAH, NO_SSB), - VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS), - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS), - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS), + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), /* * Technically, swapgs isn't serializing on AMD (despite it previously @@ -945,13 +946,13 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { */ /* AMD Family 0xf - 0x12 */ - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS), + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), {} }; @@ -962,19 +963,30 @@ static bool __init cpu_matches(unsigned long which) return m && !!(m->driver_data & which); } -static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +u64 x86_read_arch_cap_msr(void) { u64 ia32_cap = 0; + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + + return ia32_cap; +} + +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +{ + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ + if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); + if (cpu_matches(NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); - if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); @@ -991,6 +1003,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (!cpu_matches(NO_SWAPGS)) setup_force_cpu_bug(X86_BUG_SWAPGS); + /* + * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: + * - TSX is supported or + * - TSX_CTRL is present + * + * TSX_CTRL check is needed for cases when TSX could be disabled before + * the kernel boot e.g. kexec. + * TSX_CTRL check alone is not sufficient for cases when the microcode + * update is not present or running as guest that don't get TSX_CTRL. + */ + if (!(ia32_cap & ARCH_CAP_TAA_NO) && + (cpu_has(c, X86_FEATURE_RTM) || + (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) + setup_force_cpu_bug(X86_BUG_TAA); + if (cpu_matches(NO_MELTDOWN)) return; @@ -1409,6 +1436,8 @@ void __init identify_boot_cpu(void) enable_sep_cpu(); #endif cpu_detect_tlb(&boot_cpu_data); + + tsx_init(); } void identify_secondary_cpu(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 2275900d4d1b01210ce5a0dcabef3901a4237e32..4350f50b5deb37fa34ff2361680425019dcdcb4a 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -44,6 +44,22 @@ struct _tlb_table { extern const struct cpu_dev *const __x86_cpu_dev_start[], *const __x86_cpu_dev_end[]; +#ifdef CONFIG_CPU_SUP_INTEL +enum tsx_ctrl_states { + TSX_CTRL_ENABLE, + TSX_CTRL_DISABLE, + TSX_CTRL_NOT_SUPPORTED, +}; + +extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state; + +extern void __init tsx_init(void); +extern void tsx_enable(void); +extern void tsx_disable(void); +#else +static inline void tsx_init(void) { } +#endif /* CONFIG_CPU_SUP_INTEL */ + extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); extern int detect_extended_topology_early(struct cpuinfo_x86 *c); @@ -51,4 +67,6 @@ extern int detect_ht_early(struct cpuinfo_x86 *c); extern void x86_spec_ctrl_setup_ap(void); +extern u64 x86_read_arch_cap_msr(void); + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 311d0fad17e6b1c845b17536a29b89305c2daa92..a4f6e0ec4ba0fd7f3617cdbffa07ace89189d540 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -434,7 +434,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable cpuid */ - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); local_irq_restore(flags); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 860f2fd9f5407ec2a40ee3eae337db43042a2094..476a9d5c2f3517d518c78aaa49701ea3b8aa4686 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -642,6 +642,11 @@ static void init_intel(struct cpuinfo_x86 *c) detect_vmx_virtcap(c); init_intel_energy_perf(c); + + if (tsx_ctrl_state == TSX_CTRL_ENABLE) + tsx_enable(); + if (tsx_ctrl_state == TSX_CTRL_DISABLE) + tsx_disable(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c new file mode 100644 index 0000000000000000000000000000000000000000..3e20d322bc98b636cde2747394d26fcec8783a0c --- /dev/null +++ b/arch/x86/kernel/cpu/tsx.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Transactional Synchronization Extensions (TSX) control. + * + * Copyright (C) 2019 Intel Corporation + * + * Author: + * Pawan Gupta + */ + +#include + +#include + +#include "cpu.h" + +enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; + +void tsx_disable(void) +{ + u64 tsx; + + rdmsrl(MSR_IA32_TSX_CTRL, tsx); + + /* Force all transactions to immediately abort */ + tsx |= TSX_CTRL_RTM_DISABLE; + + /* + * Ensure TSX support is not enumerated in CPUID. + * This is visible to userspace and will ensure they + * do not waste resources trying TSX transactions that + * will always abort. + */ + tsx |= TSX_CTRL_CPUID_CLEAR; + + wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +void tsx_enable(void) +{ + u64 tsx; + + rdmsrl(MSR_IA32_TSX_CTRL, tsx); + + /* Enable the RTM feature in the cpu */ + tsx &= ~TSX_CTRL_RTM_DISABLE; + + /* + * Ensure TSX support is enumerated in CPUID. + * This is visible to userspace and will ensure they + * can enumerate and use the TSX feature. + */ + tsx &= ~TSX_CTRL_CPUID_CLEAR; + + wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +static bool __init tsx_ctrl_is_supported(void) +{ + u64 ia32_cap = x86_read_arch_cap_msr(); + + /* + * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this + * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. + * + * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a + * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES + * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get + * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, + * tsx= cmdline requests will do nothing on CPUs without + * MSR_IA32_TSX_CTRL support. + */ + return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); +} + +static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) +{ + if (boot_cpu_has_bug(X86_BUG_TAA)) + return TSX_CTRL_DISABLE; + + return TSX_CTRL_ENABLE; +} + +void __init tsx_init(void) +{ + char arg[5] = {}; + int ret; + + if (!tsx_ctrl_is_supported()) + return; + + ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); + if (ret >= 0) { + if (!strcmp(arg, "on")) { + tsx_ctrl_state = TSX_CTRL_ENABLE; + } else if (!strcmp(arg, "off")) { + tsx_ctrl_state = TSX_CTRL_DISABLE; + } else if (!strcmp(arg, "auto")) { + tsx_ctrl_state = x86_get_tsx_auto_mode(); + } else { + tsx_ctrl_state = TSX_CTRL_DISABLE; + pr_err("tsx: invalid option, defaulting to off\n"); + } + } else { + /* tsx= not provided */ + if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO)) + tsx_ctrl_state = x86_get_tsx_auto_mode(); + else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF)) + tsx_ctrl_state = TSX_CTRL_DISABLE; + else + tsx_ctrl_state = TSX_CTRL_ENABLE; + } + + if (tsx_ctrl_state == TSX_CTRL_DISABLE) { + tsx_disable(); + + /* + * tsx_disable() will change the state of the + * RTM CPUID bit. Clear it here since it is now + * expected to be not set. + */ + setup_clear_cpu_cap(X86_FEATURE_RTM); + } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { + + /* + * HW defaults TSX to be enabled at bootup. + * We may still need the TSX enable support + * during init for special cases like + * kexec after TSX is disabled. + */ + tsx_enable(); + + /* + * tsx_enable() will change the state of the + * RTM CPUID bit. Force it here since it is now + * expected to be set. + */ + setup_force_cpu_cap(X86_FEATURE_RTM); + } +} diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f3cfeca1083e3da068f69d547e26765a5a12a2c..dcd6df5943d60ae5e89110a0fea48ba767c12aaa 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -376,6 +376,10 @@ int __copy_instruction(u8 *dest, u8 *src) return 0; memcpy(dest, insn.kaddr, length); + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(&insn)) + return 0; + #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { s64 newdisp; diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 2863ad3066921693210b39b18763de8cae65910d..33ba47c44816bc2378a2caa8c2c5bbc07816d520 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void) irq_exit(); } +static int register_stop_handler(void) +{ + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, + NMI_FLAG_FIRST, "smp_stop"); +} + static void native_stop_other_cpus(int wait) { unsigned long flags; @@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait) apic->send_IPI_allbutself(REBOOT_VECTOR); /* - * Don't wait longer than a second if the caller - * didn't ask us to wait. + * Don't wait longer than a second for IPI completion. The + * wait request is not checked here because that would + * prevent an NMI shutdown attempt in case that not all + * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && (wait || timeout--)) + while (num_online_cpus() > 1 && timeout--) udelay(1); } - - /* if the REBOOT_VECTOR didn't work, try with the NMI */ - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, - NMI_FLAG_FIRST, "smp_stop")) - /* Note: we ignore failures here */ - /* Hope the REBOOT_IRQ is good enough */ - goto finish; - - /* sync above data before sending IRQ */ - wmb(); - pr_emerg("Shutting down cpus with NMI\n"); + /* if the REBOOT_VECTOR didn't work, try with the NMI */ + if (num_online_cpus() > 1) { + /* + * If NMI IPI is enabled, try to register the stop handler + * and send the IPI. In any case try to wait for the other + * CPUs to stop. + */ + if (!smp_no_nmi_ipi && !register_stop_handler()) { + /* Sync above data before sending IRQ */ + wmb(); - apic->send_IPI_allbutself(NMI_VECTOR); + pr_emerg("Shutting down cpus with NMI\n"); + apic->send_IPI_allbutself(NMI_VECTOR); + } /* - * Don't wait longer than a 10 ms if the caller - * didn't ask us to wait. + * Don't wait longer than 10 ms if the caller didn't + * reqeust it. If wait is true, the machine hangs here if + * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } -finish: local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index e35466afe989da4dbac2e70de288d885f8f1957a..73391c1bd2a9a007cb621d670fc08d3ca5a4d27d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool if (is_prefix_bad(insn)) return -ENOTSUPP; + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(insn)) + return -ENOTSUPP; + if (x86_64) good_insns = good_insns_64; else @@ -983,7 +987,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); - force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); + force_sig(SIGSEGV, current); } return -1; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index fc8236fd249500ec2aaa75827d8af83935a880cb..18c5b4920e92a9c419a582ba4d0c76b8fd5968db 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -466,8 +466,16 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); + entry->edx &= kvm_cpuid_7_0_edx_x86_features; cpuid_mask(&entry->edx, CPUID_7_EDX); + if (boot_cpu_has(X86_FEATURE_IBPB) && + boot_cpu_has(X86_FEATURE_IBRS)) + entry->edx |= F(SPEC_CTRL); + if (boot_cpu_has(X86_FEATURE_STIBP)) + entry->edx |= F(INTEL_STIBP); + if (boot_cpu_has(X86_FEATURE_SSBD)) + entry->edx |= F(SPEC_CTRL_SSBD); /* * We emulate ARCH_CAPABILITIES in software even * if the host doesn't support it. diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b636a1e849fdad3423669beffe1a1a1ab5d32f3c..660c35f854f8b6d11d2735512a5b0e8f9591de06 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5257,6 +5257,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->memopp->addr.mem.ea + ctxt->_eip); done: + if (rc == X86EMUL_PROPAGATE_FAULT) + ctxt->have_exception = true; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 676edfc19a95da032d8e83ff2194026b796e20c8..3a281a2decdec3571b4b486c2bb0fc7acbde5df9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,30 @@ #include #include +extern bool itlb_multihit_kvm_mitigation; + +static int __read_mostly nx_huge_pages = -1; +static uint __read_mostly nx_huge_pages_recovery_ratio = 60; + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); + +static struct kernel_param_ops nx_huge_pages_ops = { + .set = set_nx_huge_pages, + .get = param_get_bool, +}; + +static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { + .set = set_nx_huge_pages_recovery_ratio, + .get = param_get_uint, +}; + +module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); +__MODULE_PARM_TYPE(nx_huge_pages, "bool"); +module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, + &nx_huge_pages_recovery_ratio, 0644); +__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); + /* * When setting this variable to true it enables Two-Dimensional-Paging * where the hardware walks 2 page tables: @@ -131,9 +156,6 @@ module_param(dbg, bool, 0644); #include -#define CREATE_TRACE_POINTS -#include "mmutrace.h" - #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) @@ -142,6 +164,20 @@ module_param(dbg, bool, 0644); /* make pte_list_desc fit well in cache line */ #define PTE_LIST_EXT 3 +/* + * Return values of handle_mmio_page_fault and mmu.page_fault: + * RET_PF_RETRY: let CPU fault again on the address. + * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For handle_mmio_page_fault only: + * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_PF_RETRY = 0, + RET_PF_EMULATE = 1, + RET_PF_INVALID = 2, +}; + struct pte_list_desc { u64 *sptes[PTE_LIST_EXT]; struct pte_list_desc *more; @@ -179,14 +215,23 @@ static u64 __read_mostly shadow_mmio_mask; static u64 __read_mostly shadow_present_mask; static void mmu_spte_set(u64 *sptep, u64 spte); +static bool is_executable_pte(u64 spte); static void mmu_free_roots(struct kvm_vcpu *vcpu); +#define CREATE_TRACE_POINTS +#include "mmutrace.h" + void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) { shadow_mmio_mask = mmio_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); +static bool is_nx_huge_page_enabled(void) +{ + return READ_ONCE(nx_huge_pages); +} + /* * the low bit of the generation number is always presumed to be zero. * This disables mmio caching during memslot updates. The concept is @@ -324,6 +369,11 @@ static int is_last_spte(u64 pte, int level) return 0; } +static bool is_executable_pte(u64 spte) +{ + return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; +} + static kvm_pfn_t spte_to_pfn(u64 pte) { return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; @@ -767,10 +817,16 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) { - if (sp->role.direct) - BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); - else + if (!sp->role.direct) { sp->gfns[index] = gfn; + return; + } + + if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) + pr_err_ratelimited("gfn mismatch under direct page %llx " + "(expected %llx, got %llx)\n", + sp->gfn, + kvm_mmu_page_get_gfn(sp, index), gfn); } /* @@ -829,6 +885,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_gfn_disallow_lpage(slot, gfn); } +static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + if (sp->lpage_disallowed) + return; + + ++kvm->stat.nx_lpage_splits; + list_add_tail(&sp->lpage_disallowed_link, + &kvm->arch.lpage_disallowed_mmu_pages); + sp->lpage_disallowed = true; +} + static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) { struct kvm_memslots *slots; @@ -846,6 +913,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_gfn_allow_lpage(slot, gfn); } +static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + --kvm->stat.nx_lpage_splits; + sp->lpage_disallowed = false; + list_del(&sp->lpage_disallowed_link); +} + static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, struct kvm_memory_slot *slot) { @@ -2382,6 +2456,9 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, kvm_reload_remote_mmus(kvm); } + if (sp->lpage_disallowed) + unaccount_huge_nx_page(kvm, sp); + sp->role.invalid = 1; return ret; } @@ -2533,6 +2610,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (!speculative) spte |= shadow_accessed_mask; + if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) && + is_nx_huge_page_enabled()) { + pte_access &= ~ACC_EXEC_MASK; + } + if (pte_access & ACC_EXEC_MASK) spte |= shadow_x_mask; else @@ -2598,13 +2680,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, return ret; } -static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, - bool speculative, bool host_writable) +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, + int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, + bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; - bool emulate = false; + int ret = RET_PF_RETRY; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); @@ -2634,18 +2716,15 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, true, host_writable)) { if (write_fault) - emulate = true; + ret = RET_PF_EMULATE; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } if (unlikely(is_mmio_spte(*sptep))) - emulate = true; + ret = RET_PF_EMULATE; pgprintk("%s: setting spte %llx\n", __func__, *sptep); - pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", - is_large_pte(*sptep)? "2MB" : "4kB", - *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, - *sptep, sptep); + trace_kvm_mmu_set_spte(level, gfn, sptep); if (!was_rmapped && is_large_pte(*sptep)) ++vcpu->kvm->stat.lpages; @@ -2657,9 +2736,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, } } - kvm_release_pfn_clean(pfn); - - return emulate; + return ret; } static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, @@ -2693,9 +2770,11 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, if (ret <= 0) return -1; - for (i = 0; i < ret; i++, gfn++, start++) + for (i = 0; i < ret; i++, gfn++, start++) { mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); + put_page(pages[i]); + } return 0; } @@ -2743,40 +2822,71 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) __direct_pte_prefetch(vcpu, sp, sptep); } -static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, - int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault) +static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, + gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) { - struct kvm_shadow_walk_iterator iterator; + int level = *levelp; + u64 spte = *it.sptep; + + if (it.level == level && level > PT_PAGE_TABLE_LEVEL && + is_nx_huge_page_enabled() && + is_shadow_present_pte(spte) && + !is_large_pte(spte)) { + /* + * A small SPTE exists for this pfn, but FNAME(fetch) + * and __direct_map would like to create a large PTE + * instead: just force them to go down another level, + * patching back for them into pfn the next 9 bits of + * the address. + */ + u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); + *pfnp |= gfn & page_mask; + (*levelp)--; + } +} + +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, + int map_writable, int level, kvm_pfn_t pfn, + bool prefault, bool lpage_disallowed) +{ + struct kvm_shadow_walk_iterator it; struct kvm_mmu_page *sp; - int emulate = 0; - gfn_t pseudo_gfn; + int ret; + gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t base_gfn = gfn; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) - return 0; + return RET_PF_RETRY; - for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { - if (iterator.level == level) { - emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, - write, level, gfn, pfn, prefault, - map_writable); - direct_pte_prefetch(vcpu, iterator.sptep); - ++vcpu->stat.pf_fixed; - break; - } + trace_kvm_mmu_spte_requested(gpa, level, pfn); + for_each_shadow_entry(vcpu, gpa, it) { + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &level); - drop_large_spte(vcpu, iterator.sptep); - if (!is_shadow_present_pte(*iterator.sptep)) { - u64 base_addr = iterator.addr; + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == level) + break; - base_addr &= PT64_LVL_ADDR_MASK(iterator.level); - pseudo_gfn = base_addr >> PAGE_SHIFT; - sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, - iterator.level - 1, 1, ACC_ALL); + drop_large_spte(vcpu, it.sptep); + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, + it.level - 1, true, ACC_ALL); - link_shadow_page(vcpu, iterator.sptep, sp); + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); } } - return emulate; + + ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, + write, level, base_gfn, pfn, prefault, + map_writable); + direct_pte_prefetch(vcpu, it.sptep); + ++vcpu->stat.pf_fixed; + return ret; } static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) @@ -2798,25 +2908,23 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) * Do not cache the mmio info caused by writing the readonly gfn * into the spte otherwise read access on readonly gfn also can * caused mmio page fault and treat it as mmio access. - * Return 1 to tell kvm to emulate it. */ if (pfn == KVM_PFN_ERR_RO_FAULT) - return 1; + return RET_PF_EMULATE; if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); - return 0; + return RET_PF_RETRY; } return -EFAULT; } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, - gfn_t *gfnp, kvm_pfn_t *pfnp, + gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) { kvm_pfn_t pfn = *pfnp; - gfn_t gfn = *gfnp; int level = *levelp; /* @@ -2826,7 +2934,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, * here. */ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && - level == PT_PAGE_TABLE_LEVEL && + !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompoundMap(pfn_to_page(pfn)) && !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; @@ -2843,8 +2951,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, mask = KVM_PAGES_PER_HPAGE(level) - 1; VM_BUG_ON((gfn & mask) != (pfn & mask)); if (pfn & mask) { - gfn &= ~mask; - *gfnp = gfn; kvm_release_pfn_clean(pfn); pfn &= ~mask; kvm_get_pfn(pfn); @@ -3012,11 +3118,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, { int r; int level; - bool force_pt_level = false; + bool force_pt_level; kvm_pfn_t pfn; unsigned long mmu_seq; bool map_writable, write = error_code & PFERR_WRITE_MASK; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + force_pt_level = lpage_disallowed; level = mapping_level(vcpu, gfn, &force_pt_level); if (likely(!force_pt_level)) { /* @@ -3031,32 +3140,30 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, } if (fast_page_fault(vcpu, v, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) return r; + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; make_mmu_pages_available(vcpu); if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); - r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; - + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, v, write, map_writable, level, pfn, + prefault, false); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return r; } @@ -3383,38 +3490,38 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) return reserved; } -int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) +static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; bool reserved; if (mmio_info_in_cache(vcpu, addr, direct)) - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); if (WARN_ON(reserved)) - return RET_MMIO_PF_BUG; + return -EINVAL; if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); unsigned access = get_mmio_spte_access(spte); if (!check_mmio_spte(vcpu, spte)) - return RET_MMIO_PF_INVALID; + return RET_PF_INVALID; if (direct) addr = 0; trace_handle_mmio_page_fault(addr, gfn, access); vcpu_cache_mmio_info(vcpu, addr, gfn, access); - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; } /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ - return RET_MMIO_PF_RETRY; + return RET_PF_RETRY; } EXPORT_SYMBOL_GPL(handle_mmio_page_fault); @@ -3464,7 +3571,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) @@ -3548,18 +3655,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, unsigned long mmu_seq; int write = error_code & PFERR_WRITE_MASK; bool map_writable; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) return r; - force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, - PT_DIRECTORY_LEVEL); + force_pt_level = + lpage_disallowed || + !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); level = mapping_level(vcpu, gfn, &force_pt_level); if (likely(!force_pt_level)) { if (level > PT_DIRECTORY_LEVEL && @@ -3569,32 +3679,30 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, } if (fast_page_fault(vcpu, gpa, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) return r; + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; make_mmu_pages_available(vcpu); if (likely(!force_pt_level)) - transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); - r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; - + transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); + r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, + prefault, lpage_disallowed); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return r; } static void nonpaging_init_context(struct kvm_vcpu *vcpu, @@ -4510,23 +4618,24 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu); + r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { r = handle_mmio_page_fault(vcpu, cr2, direct); - if (r == RET_MMIO_PF_EMULATE) { + if (r == RET_PF_EMULATE) { emulation_type = 0; goto emulate; } - if (r == RET_MMIO_PF_RETRY) - return 1; - if (r < 0) - return r; } - r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); + if (r == RET_PF_INVALID) { + r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); + WARN_ON(r == RET_PF_INVALID); + } + + if (r == RET_PF_RETRY) + return 1; if (r < 0) return r; - if (!r) - return 1; if (mmio_info_in_cache(vcpu, cr2, direct)) emulation_type = 0; @@ -4781,9 +4890,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, * the guest, and the guest page table is using 4K page size * mapping if the indirect sp has level = 1. */ - if (sp->role.direct && - !kvm_is_reserved_pfn(pfn) && - PageTransCompoundMap(pfn_to_page(pfn))) { + if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && + !kvm_is_zone_device_pfn(pfn) && + PageTransCompoundMap(pfn_to_page(pfn))) { drop_spte(kvm, sptep); need_tlb_flush = 1; goto restart; @@ -4965,7 +5074,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) int nr_to_scan = sc->nr_to_scan; unsigned long freed = 0; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { int idx; @@ -5015,7 +5124,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) break; } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); return freed; } @@ -5039,8 +5148,58 @@ static void mmu_destroy_caches(void) kmem_cache_destroy(mmu_page_header_cache); } +static bool get_nx_auto_mode(void) +{ + /* Return true when CPU has the bug, and mitigations are ON */ + return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); +} + +static void __set_nx_huge_pages(bool val) +{ + nx_huge_pages = itlb_multihit_kvm_mitigation = val; +} + +static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) +{ + bool old_val = nx_huge_pages; + bool new_val; + + /* In "auto" mode deploy workaround only if CPU has the bug. */ + if (sysfs_streq(val, "off")) + new_val = 0; + else if (sysfs_streq(val, "force")) + new_val = 1; + else if (sysfs_streq(val, "auto")) + new_val = get_nx_auto_mode(); + else if (strtobool(val, &new_val) < 0) + return -EINVAL; + + __set_nx_huge_pages(new_val); + + if (new_val != old_val) { + struct kvm *kvm; + int idx; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) { + idx = srcu_read_lock(&kvm->srcu); + kvm_mmu_invalidate_zap_all_pages(kvm); + srcu_read_unlock(&kvm->srcu, idx); + + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + } + mutex_unlock(&kvm_lock); + } + + return 0; +} + int kvm_mmu_module_init(void) { + if (nx_huge_pages == -1) + __set_nx_huge_pages(get_nx_auto_mode()); + pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); @@ -5104,3 +5263,116 @@ void kvm_mmu_module_exit(void) unregister_shrinker(&mmu_shrinker); mmu_audit_disable(); } + +static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) +{ + unsigned int old_val; + int err; + + old_val = nx_huge_pages_recovery_ratio; + err = param_set_uint(val, kp); + if (err) + return err; + + if (READ_ONCE(nx_huge_pages) && + !old_val && nx_huge_pages_recovery_ratio) { + struct kvm *kvm; + + mutex_lock(&kvm_lock); + + list_for_each_entry(kvm, &vm_list, vm_list) + wake_up_process(kvm->arch.nx_lpage_recovery_thread); + + mutex_unlock(&kvm_lock); + } + + return err; +} + +static void kvm_recover_nx_lpages(struct kvm *kvm) +{ + int rcu_idx; + struct kvm_mmu_page *sp; + unsigned int ratio; + LIST_HEAD(invalid_list); + ulong to_zap; + + rcu_idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; + while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { + /* + * We use a separate list instead of just using active_mmu_pages + * because the number of lpage_disallowed pages is expected to + * be relatively small compared to the total. + */ + sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, + struct kvm_mmu_page, + lpage_disallowed_link); + WARN_ON_ONCE(!sp->lpage_disallowed); + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); + WARN_ON_ONCE(sp->lpage_disallowed); + + if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { + kvm_mmu_commit_zap_page(kvm, &invalid_list); + if (to_zap) + cond_resched_lock(&kvm->mmu_lock); + } + } + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +} + +static long get_nx_lpage_recovery_timeout(u64 start_time) +{ + return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) + ? start_time + 60 * HZ - get_jiffies_64() + : MAX_SCHEDULE_TIMEOUT; +} + +static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) +{ + u64 start_time; + long remaining_time; + + while (true) { + start_time = get_jiffies_64(); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop() && remaining_time > 0) { + schedule_timeout(remaining_time); + remaining_time = get_nx_lpage_recovery_timeout(start_time); + set_current_state(TASK_INTERRUPTIBLE); + } + + set_current_state(TASK_RUNNING); + + if (kthread_should_stop()) + return 0; + + kvm_recover_nx_lpages(kvm); + } +} + +int kvm_mmu_post_init_vm(struct kvm *kvm) +{ + int err; + + err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, + "kvm-nx-lpage-recovery", + &kvm->arch.nx_lpage_recovery_thread); + if (!err) + kthread_unpark(kvm->arch.nx_lpage_recovery_thread); + + return err; +} + +void kvm_mmu_pre_destroy_vm(struct kvm *kvm) +{ + if (kvm->arch.nx_lpage_recovery_thread) + kthread_stop(kvm->arch.nx_lpage_recovery_thread); +} diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index c92834c55c59ea43b0c34c57398ddb201b7d6281..e584689e7d469dbd9ba71dad5ed51e8c78d32558 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); -/* - * Return values of handle_mmio_page_fault: - * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction - * directly. - * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page - * fault path update the mmio spte. - * RET_MMIO_PF_RETRY: let CPU fault again on the address. - * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). - */ -enum { - RET_MMIO_PF_EMULATE = 1, - RET_MMIO_PF_INVALID = 2, - RET_MMIO_PF_RETRY = 0, - RET_MMIO_PF_BUG = -1 -}; - -int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); @@ -202,4 +185,8 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); + +int kvm_mmu_post_init_vm(struct kvm *kvm); +void kvm_mmu_pre_destroy_vm(struct kvm *kvm); + #endif diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 5a24b846a1cb8cf2d7d66bcdb275c50c2f483626..756b14ecc957a8ce337a60eae320ccf87b7c7da7 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -322,6 +322,65 @@ TRACE_EVENT( __entry->kvm_gen == __entry->spte_gen ) ); + +TRACE_EVENT( + kvm_mmu_set_spte, + TP_PROTO(int level, gfn_t gfn, u64 *sptep), + TP_ARGS(level, gfn, sptep), + + TP_STRUCT__entry( + __field(u64, gfn) + __field(u64, spte) + __field(u64, sptep) + __field(u8, level) + /* These depend on page entry type, so compute them now. */ + __field(bool, r) + __field(bool, x) + __field(u8, u) + ), + + TP_fast_assign( + __entry->gfn = gfn; + __entry->spte = *sptep; + __entry->sptep = virt_to_phys(sptep); + __entry->level = level; + __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK); + __entry->x = is_executable_pte(__entry->spte); + __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1; + ), + + TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx", + __entry->gfn, __entry->spte, + __entry->r ? "r" : "-", + __entry->spte & PT_PRESENT_MASK ? "w" : "-", + __entry->x ? "x" : "-", + __entry->u == -1 ? "" : (__entry->u ? "u" : "-"), + __entry->level, __entry->sptep + ) +); + +TRACE_EVENT( + kvm_mmu_spte_requested, + TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn), + TP_ARGS(addr, level, pfn), + + TP_STRUCT__entry( + __field(u64, gfn) + __field(u64, pfn) + __field(u8, level) + ), + + TP_fast_assign( + __entry->gfn = addr >> PAGE_SHIFT; + __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); + __entry->level = level; + ), + + TP_printk("gfn %llx pfn %llx level %d", + __entry->gfn, __entry->pfn, __entry->level + ) +); + #endif /* _TRACE_KVMMMU_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 37363900297d3b4730424fdf6353fe8929c45c80..e03225e707b260d4a3c5f9e7f7dd3fc3a84507c0 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -499,6 +499,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); + kvm_release_pfn_clean(pfn); return true; } @@ -572,12 +573,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct guest_walker *gw, int write_fault, int hlevel, - kvm_pfn_t pfn, bool map_writable, bool prefault) + kvm_pfn_t pfn, bool map_writable, bool prefault, + bool lpage_disallowed) { struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; unsigned direct_access, access = gw->pt_access; - int top_level, emulate; + int top_level, ret; + gfn_t gfn, base_gfn; direct_access = gw->pte_access; @@ -622,36 +625,49 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, link_shadow_page(vcpu, it.sptep, sp); } - for (; - shadow_walk_okay(&it) && it.level > hlevel; - shadow_walk_next(&it)) { - gfn_t direct_gfn; + /* + * FNAME(page_fault) might have clobbered the bottom bits of + * gw->gfn, restore them from the virtual address. + */ + gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); + base_gfn = gfn; + trace_kvm_mmu_spte_requested(addr, gw->level, pfn); + + for (; shadow_walk_okay(&it); shadow_walk_next(&it)) { clear_sp_write_flooding_count(it.sptep); - validate_direct_spte(vcpu, it.sptep, direct_access); - drop_large_spte(vcpu, it.sptep); + /* + * We cannot overwrite existing page tables with an NX + * large page, as the leaf could be executable. + */ + disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel); - if (is_shadow_present_pte(*it.sptep)) - continue; + base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + if (it.level == hlevel) + break; - direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); + validate_direct_spte(vcpu, it.sptep, direct_access); - sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, - true, direct_access); - link_shadow_page(vcpu, it.sptep, sp); + drop_large_spte(vcpu, it.sptep); + + if (!is_shadow_present_pte(*it.sptep)) { + sp = kvm_mmu_get_page(vcpu, base_gfn, addr, + it.level - 1, true, direct_access); + link_shadow_page(vcpu, it.sptep, sp); + if (lpage_disallowed) + account_huge_nx_page(vcpu->kvm, sp); + } } - clear_sp_write_flooding_count(it.sptep); - emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, gw->gfn, pfn, prefault, map_writable); + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, + it.level, base_gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); - - return emulate; + ++vcpu->stat.pf_fixed; + return ret; out_gpte_changed: - kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } /* @@ -717,9 +733,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, int r; kvm_pfn_t pfn; int level = PT_PAGE_TABLE_LEVEL; - bool force_pt_level = false; unsigned long mmu_seq; bool map_writable, is_self_change_mapping; + bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) && + is_nx_huge_page_enabled(); + bool force_pt_level = lpage_disallowed; pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); @@ -746,12 +764,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (!prefault) inject_page_fault(vcpu, &walker.fault); - return 0; + return RET_PF_RETRY; } if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { shadow_page_table_clear_flood(vcpu, addr); - return 1; + return RET_PF_EMULATE; } vcpu->arch.write_fault_to_shadow_pgtable = false; @@ -773,7 +791,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr, walker.gfn, pfn, walker.pte_access, &r)) @@ -799,6 +817,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, walker.pte_access &= ~ACC_EXEC_MASK; } + r = RET_PF_RETRY; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -806,19 +825,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); make_mmu_pages_available(vcpu); if (!force_pt_level) - transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); + transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level); r = FNAME(fetch)(vcpu, addr, &walker, write_fault, - level, pfn, map_writable, prefault); - ++vcpu->stat.pf_fixed; + level, pfn, map_writable, prefault, lpage_disallowed); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); - spin_unlock(&vcpu->kvm->mmu_lock); - - return r; out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return r; } static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f7a7b98b3271a6fc5df234854d86174c8222a5a0..1079228e4fefffbf8a2abbd69244d7e82dbc1c08 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -590,8 +590,14 @@ static int get_npt_level(void) static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) { vcpu->arch.efer = efer; - if (!npt_enabled && !(efer & EFER_LMA)) - efer &= ~EFER_LME; + + if (!npt_enabled) { + /* Shadow paging assumes NX to be available. */ + efer |= EFER_NX; + + if (!(efer & EFER_LMA)) + efer &= ~EFER_LME; + } to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6e36152694f72be039fd9b9d3091c76eb2838add..8e78252973c4d762cff52ba89a5ee6cd3a96cea8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1547,7 +1547,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) return -1; } -static inline void __invvpid(int ext, u16 vpid, gva_t gva) +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; @@ -1561,7 +1561,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) : : "a"(&operand), "c"(ext) : "cc", "memory"); } -static inline void __invept(int ext, u64 eptp, gpa_t gpa) +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; @@ -2219,17 +2219,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; - if (!enable_ept) { - /* - * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing - * host CPUID is more efficient than testing guest CPUID - * or CR4. Host SMEP is anyway a requirement for guest SMEP. - */ - if (boot_cpu_has(X86_FEATURE_SMEP)) - guest_efer |= EFER_NX; - else if (!(guest_efer & EFER_NX)) - ignore_bits |= EFER_NX; - } + /* Shadow paging assumes NX to be available. */ + if (!enable_ept) + guest_efer |= EFER_NX; /* * LMA and LME handled by hardware; SCE meaningless outside long mode. @@ -6556,16 +6548,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) NULL, 0) == EMULATE_DONE; } - ret = handle_mmio_page_fault(vcpu, gpa, true); - if (likely(ret == RET_MMIO_PF_EMULATE)) - return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == - EMULATE_DONE; - - if (unlikely(ret == RET_MMIO_PF_INVALID)) - return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); - - if (unlikely(ret == RET_MMIO_PF_RETRY)) - return 1; + ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); + if (ret >= 0) + return ret; /* It is the real ept misconfig */ WARN_ON(1); @@ -7639,6 +7624,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; + struct x86_exception e; if (!nested_vmx_check_permission(vcpu) || !nested_vmx_check_vmcs12(vcpu)) @@ -7665,8 +7651,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu) vmx_instruction_info, true, &gva)) return 1; /* _system ok, as nested_vmx_check_permission verified cpl=0 */ - kvm_write_guest_virt_system(vcpu, gva, &field_value, - (is_long_mode(vcpu) ? 8 : 4), NULL); + if (kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), + &e)) + kvm_inject_page_fault(vcpu, &e); } nested_vmx_succeed(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bbecbf2b1f5edc731046d16486f15e34cb8b9aec..06cd710e1d45c33ddc4bca551dbf4ed79cc8d8b4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -191,6 +191,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, + { "nx_largepages_splitted", VM_STAT(nx_lpage_splits) }, { NULL } }; @@ -535,8 +536,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, data, offset, len, access); } +static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) +{ + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | + rsvd_bits(1, 2); +} + /* - * Load the pae pdptrs. Return true is they are all valid. + * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { @@ -555,8 +562,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && - (pdpte[i] & - vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { + (pdpte[i] & pdptr_rsvd_bits(vcpu))) { ret = 0; goto out; } @@ -582,7 +588,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) gfn_t gfn; int r; - if (is_long_mode(vcpu) || !is_pae(vcpu)) + if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, @@ -1026,6 +1032,14 @@ u64 kvm_get_arch_capabilities(void) rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data); + /* + * If nx_huge_pages is enabled, KVM's shadow paging will ensure that + * the nested hypervisor runs with NX huge pages. If it is not, + * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other + * L1 guests, so it need not worry about its own (L2) guests. + */ + data |= ARCH_CAP_PSCHANGE_MC_NO; + /* * If we're doing cache flushes (either "always" or "cond") * we will do one whenever the guest does a vmlaunch/vmresume. @@ -1038,8 +1052,35 @@ u64 kvm_get_arch_capabilities(void) if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + data |= ARCH_CAP_RDCL_NO; + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + data |= ARCH_CAP_SSB_NO; + if (!boot_cpu_has_bug(X86_BUG_MDS)) + data |= ARCH_CAP_MDS_NO; + + /* + * On TAA affected systems, export MDS_NO=0 when: + * - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1. + * - Updated microcode is present. This is detected by + * the presence of ARCH_CAP_TSX_CTRL_MSR and ensures + * that VERW clears CPU buffers. + * + * When MDS_NO=0 is exported, guests deploy clear CPU buffer + * mitigation and don't complain: + * + * "Vulnerable: Clear CPU buffers attempted, no microcode" + * + * If TSX is disabled on the system, guests are also mitigated against + * TAA and clear CPU buffer mitigation is not required for guests. + */ + if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) && + (data & ARCH_CAP_TSX_CTRL_MSR)) + data &= ~ARCH_CAP_MDS_NO; + return data; } + EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); static int kvm_get_msr_feature(struct kvm_msr_entry *msr) @@ -4620,6 +4661,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, /* kvm_write_guest_virt_system can pull in tons of pages. */ vcpu->arch.l1tf_flush_l1d = true; + /* + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED + * is returned, but our callers are not ready for that and they blindly + * call kvm_inject_page_fault. Ensure that they at least do not leak + * uninitialized kernel stack memory into cr2 and error code. + */ + memset(exception, 0, sizeof(*exception)); return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } @@ -5757,8 +5805,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; - if (ctxt->have_exception && inject_emulated_exception(vcpu)) + if (ctxt->have_exception) { + /* + * #UD should result in just EMULATION_FAILED, and trap-like + * exception should not be encountered during decode. + */ + WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || + exception_type(ctxt->exception.vector) == EXCPT_TRAP); + inject_emulated_exception(vcpu); return EMULATE_DONE; + } if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); @@ -5931,17 +5987,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - if (vcpu->cpu != smp_processor_id()) + if (vcpu->cpu != raw_smp_processor_id()) send_ipi = 1; } } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* @@ -6079,12 +6135,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work) struct kvm_vcpu *vcpu; int i; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); atomic_set(&kvm_guest_has_master_clock, 0); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); @@ -7471,7 +7527,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); - if (!is_long_mode(vcpu) && is_pae(vcpu)) { + if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } @@ -8052,6 +8108,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); + INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); @@ -8080,6 +8137,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; } +int kvm_arch_post_init_vm(struct kvm *kvm) +{ + return kvm_mmu_post_init_vm(kvm); +} + static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; @@ -8186,6 +8248,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) } EXPORT_SYMBOL_GPL(x86_set_memory_region); +void kvm_arch_pre_destroy_vm(struct kvm *kvm) +{ + kvm_mmu_pre_destroy_vm(kvm); +} + void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 9758524ee99f79260b2feb1307a20af9957a13a4..71a3759a2d4e8d1fd35dec2d51364ad359e34300 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -112,8 +112,8 @@ static void delay_mwaitx(unsigned long __loops) __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); /* - * AMD, like Intel, supports the EAX hint and EAX=0xf - * means, do not enter any deep C-state and we use it + * AMD, like Intel's MWAIT version, supports the EAX hint and + * EAX=0xf0 means, do not enter any deep C-state and we use it * here in delay() to minimize wakeup latency. */ __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index a0e85f2aff7d871bf443d202cb53a2a54b448efb..b6669d326545a28a306719ff534899a9f8cd8080 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -896,9 +896,6 @@ static void __init kexec_enter_virtual_mode(void) if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) runtime_code_page_mkexec(); - - /* clean DUMMY object */ - efi_delete_dummy_variable(); #endif } diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index a3d2c62fd805772c0c9d56fe57b80a9d058cf3ff..0a3ad5dd1e8bfb8f6d6f7192030af0ad90841f9f 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -68,7 +68,7 @@ BEGIN { lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix3_expr = "\\((F2|!F3|66&F2)\\)" lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 @@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod) return add_flags(imm, mod) } -/^[0-9a-f]+\:/ { +/^[0-9a-f]+:/ { if (NR == 1) next # get index diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index a71d2739fa8288c39af870eb40fe921553080db6..9210b9cc4ec96393002d24c5a3bc816409821924 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -114,13 +114,6 @@ EXPORT_SYMBOL(__invalidate_icache_range); // FIXME EXPORT_SYMBOL(screen_info); #endif -EXPORT_SYMBOL(outsb); -EXPORT_SYMBOL(outsw); -EXPORT_SYMBOL(outsl); -EXPORT_SYMBOL(insb); -EXPORT_SYMBOL(insw); -EXPORT_SYMBOL(insl); - extern long common_exception_return; EXPORT_SYMBOL(common_exception_return); diff --git a/block/bio.c b/block/bio.c index 7c0f0d191eebcde5c9352eeee2ffef4c949dfd9a..1b7b9812ba7c8d51d28a513ca12bbf69ab34c214 100644 --- a/block/bio.c +++ b/block/bio.c @@ -865,6 +865,9 @@ int bio_add_page(struct bio *bio, struct page *page, bio->bi_vcnt++; done: bio->bi_iter.bi_size += len; + + if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) + bio_set_flag(bio, BIO_WORKINGSET); return len; } EXPORT_SYMBOL(bio_add_page); diff --git a/block/blk-core.c b/block/blk-core.c index faefb01cc95234451113b0f794766f7fef27a669..f05e9bbf2ddc0f1f64847272a7d1a5d0fa77fe30 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -33,6 +33,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2081,6 +2082,10 @@ EXPORT_SYMBOL(generic_make_request); */ blk_qc_t submit_bio(struct bio *bio) { + bool workingset_read = false; + unsigned long pflags; + blk_qc_t ret; + /* * If it's a regular read/write or a barrier with data attached, * go through the normal accounting stuff before submission. @@ -2096,6 +2101,8 @@ blk_qc_t submit_bio(struct bio *bio) if (op_is_write(bio_op(bio))) { count_vm_events(PGPGOUT, count); } else { + if (bio_flagged(bio, BIO_WORKINGSET)) + workingset_read = true; task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, count); } @@ -2111,7 +2118,21 @@ blk_qc_t submit_bio(struct bio *bio) } } - return generic_make_request(bio); + /* + * If we're reading data that is part of the userspace + * workingset, count submission time as memory stall. When the + * device is congested, or the submitting cgroup IO-throttled, + * submission can be a significant part of overall IO time. + */ + if (workingset_read) + psi_memstall_enter(&pflags); + + ret = generic_make_request(bio); + + if (workingset_read) + psi_memstall_leave(&pflags); + + return ret; } EXPORT_SYMBOL(submit_bio); diff --git a/build.config.aarch64 b/build.config.aarch64 new file mode 100644 index 0000000000000000000000000000000000000000..523bbc0449f75f17fcdcaf78f3c4b4fcb225e3a0 --- /dev/null +++ b/build.config.aarch64 @@ -0,0 +1,11 @@ +ARCH=arm64 + +CLANG_TRIPLE=aarch64-linux-gnu- +CROSS_COMPILE=aarch64-linux-androidkernel- +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin + +FILES=" +arch/arm64/boot/Image.gz +vmlinux +System.map +" diff --git a/build.config.common b/build.config.common new file mode 100644 index 0000000000000000000000000000000000000000..c9e16558801e0b21ec3440afb63345997472158c --- /dev/null +++ b/build.config.common @@ -0,0 +1,8 @@ +BRANCH=android-4.9-q +KERNEL_DIR=common + +CC=clang +CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r365631c/bin + +EXTRA_CMDS='' +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64 index 428186fff22442c3658754d184d05a2a38e56fec..0cb6019589729dbe4d8f9c78be25812bb5164728 100644 --- a/build.config.cuttlefish.aarch64 +++ b/build.config.cuttlefish.aarch64 @@ -1,16 +1,5 @@ -ARCH=arm64 -BRANCH=android-4.9 -CLANG_TRIPLE=aarch64-linux-gnu- -CROSS_COMPILE=aarch64-linux-androidkernel- +. ${ROOT_DIR}/common/build.config.common +. ${ROOT_DIR}/common/build.config.aarch64 + DEFCONFIG=cuttlefish_defconfig -EXTRA_CMDS='' -KERNEL_DIR=common POST_DEFCONFIG_CMDS="check_defconfig" -CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin -LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin -FILES=" -arch/arm64/boot/Image.gz -vmlinux -System.map -" -STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 index 7a79036c8cecab37906c5a497471dc7daff9c672..fed773ccc64aee16c7180bb37eaf564c2a68a583 100644 --- a/build.config.cuttlefish.x86_64 +++ b/build.config.cuttlefish.x86_64 @@ -1,16 +1,5 @@ -ARCH=x86_64 -BRANCH=android-4.9 -CLANG_TRIPLE=x86_64-linux-gnu- -CROSS_COMPILE=x86_64-linux-androidkernel- +. ${ROOT_DIR}/common/build.config.common +. ${ROOT_DIR}/common/build.config.x86_64 + DEFCONFIG=x86_64_cuttlefish_defconfig -EXTRA_CMDS='' -KERNEL_DIR=common POST_DEFCONFIG_CMDS="check_defconfig" -CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin -LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin -FILES=" -arch/x86/boot/bzImage -vmlinux -System.map -" -STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.x86_64 b/build.config.x86_64 new file mode 100644 index 0000000000000000000000000000000000000000..df73a47e722084fbb8605695353b3a294f06a135 --- /dev/null +++ b/build.config.x86_64 @@ -0,0 +1,11 @@ +ARCH=x86_64 + +CLANG_TRIPLE=x86_64-linux-gnu- +CROSS_COMPILE=x86_64-linux-androidkernel- +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin + +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" diff --git a/certs/Makefile b/certs/Makefile index 2773c4afa24c0af0d8bfd73ab05a01606c292a7d..9d89b992eee623425ddc72b586628ec9500f0ca5 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -17,6 +17,10 @@ AFLAGS_system_certificates.o := -I$(srctree) quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2)) cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1) +ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYS),"verity.x509.pem") +SYSTEM_TRUSTED_KEYS_SRCPREFIX := $(srctree)/certs/ +endif + targets += x509_certificate_list $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) diff --git a/certs/verity.x509.pem b/certs/verity.x509.pem new file mode 100644 index 0000000000000000000000000000000000000000..86399c3c1dd7d5ae8dcee2c0b68b3f12a7237f28 --- /dev/null +++ b/certs/verity.x509.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g +VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE +AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe +Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET +MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G +A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p +ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi +6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o +fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3 +T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r +AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP +jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w +HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7 +oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix +NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6 +JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N +dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg +UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA +yttuAJAEAymk1mipd9+zp38= +-----END CERTIFICATE----- diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 7830d304dff6f75d9464857fbab0592872657d37..d58224d8086759ed7ba06467b09fd1dcd72b9d64 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -267,15 +267,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); - req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); - if (!req_ctx->out_buf) { - kfree(req_ctx->in_buf); - return -ENOMEM; - } - - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, - ctx->key_size, NULL); - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, pkcs1pad_encrypt_sign_complete_cb, req); diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 6b0d3ef7309cb710a63b38848ad35b770fed3c1a..2ccfbb61ca89987d67d68da320251ba291a6982b 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) if (node < 0) node = memory_add_physaddr_to_nid(info->start_addr); - result = add_memory(node, info->start_addr, info->length); + result = __add_memory(node, info->start_addr, info->length); /* * If the memory block has been used by the kernel, add_memory() diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 92fa47c6498cdf50d07319ef282fc4b2bf5f4ada..20fd17aaa9189e2e95a97a08e2240846d5b30923 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -247,6 +247,8 @@ acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj, u8 acpi_ns_locked); +u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); + /* * evsci - SCI (System Control Interrupt) handling/dispatch */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index dff1207a60788ad2a5b93b64ba1cc9d90945f52a..219bc576d1270f732d30f31fba107039844b9983 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -428,9 +428,9 @@ struct acpi_simple_repair_info { /* Info for running the _REG methods */ struct acpi_reg_walk_info { - acpi_adr_space_type space_id; u32 function; u32 reg_run_count; + acpi_adr_space_type space_id; }; /***************************************************************************** diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 4c6f795140402a3d74231882cb67561fe0c15115..9cb60fdc77e5061d3032eb46be40d08da9a7301b 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -677,6 +677,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, ACPI_FUNCTION_TRACE(ev_execute_reg_methods); + /* + * These address spaces do not need a call to _REG, since the ACPI + * specification defines them as: "must always be accessible". Since + * they never change state (never become unavailable), no need to ever + * call _REG on them. Also, a data_table is not a "real" address space, + * so do not call _REG. September 2018. + */ + if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) || + (space_id == ACPI_ADR_SPACE_SYSTEM_IO) || + (space_id == ACPI_ADR_SPACE_DATA_TABLE)) { + return_VOID; + } + info.space_id = space_id; info.function = function; info.reg_run_count = 0; @@ -738,8 +751,8 @@ acpi_ev_reg_run(acpi_handle obj_handle, } /* - * We only care about regions.and objects that are allowed to have address - * space handlers + * We only care about regions and objects that are allowed to have + * address space handlers */ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { return (AE_OK); diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 75ddd160a716faaa10c81ac92ddca37d1a73dcbf..c8646c3977865c0a451a0c7bf4c26b3eefe6160c 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -50,9 +50,6 @@ #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evrgnini") -/* Local prototypes */ -static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); - /******************************************************************************* * * FUNCTION: acpi_ev_system_memory_region_setup @@ -67,7 +64,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node); * DESCRIPTION: Setup a system_memory operation region * ******************************************************************************/ - acpi_status acpi_ev_system_memory_region_setup(acpi_handle handle, u32 function, @@ -347,7 +343,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, * ******************************************************************************/ -static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) +u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) { acpi_status status; struct acpi_pnp_device_id *hid; diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index d2743067126aff25af1dd6038041ae5368c95634..f87d59a05c6816093574936fea46a81a9ca22925 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -227,7 +227,6 @@ acpi_remove_address_space_handler(acpi_handle device, */ region_obj = handler_obj->address_space.region_list; - } /* Remove this Handler object from the list */ diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index e0ea8f56d2bfd9ceb19fa63eda3544195e4c1e56..9ec4618df5338abd0c78aaf4c4a019f1bf57085c 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -360,8 +360,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) union acpi_object *psd = NULL; struct acpi_psd_package *pdomain; - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, - ACPI_TYPE_PACKAGE); + status = acpi_evaluate_object_typed(handle, "_PSD", NULL, + &buffer, ACPI_TYPE_PACKAGE); + if (status == AE_NOT_FOUND) /* _PSD is optional */ + return 0; if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index c68e72414a67a9b00231b095335945d5cdd2f31e..435bd0ffc8c023a203e98f8eede528b2c5172061 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, if ((*ppos > max_size) || (*ppos + count > max_size) || (*ppos + count < count) || - (count > uncopied_bytes)) + (count > uncopied_bytes)) { + kfree(buf); return -EINVAL; + } if (copy_from_user(buf + (*ppos), user_buf, count)) { kfree(buf); @@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); } + kfree(buf); return count; } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 416953a4251094ce2066d724c1d88583b8e89705..b9fade7a3bcf434050d593f4cb468c30e04a9e7b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1126,6 +1126,7 @@ void acpi_os_wait_events_complete(void) flush_workqueue(kacpid_wq); flush_workqueue(kacpi_notify_wq); } +EXPORT_SYMBOL(acpi_os_wait_events_complete); struct acpi_hp_work { struct work_struct work; diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index c576a6fe4ebb3044fc8f268fe338d3a50084cd6f..94ded9513c73b0bede043e85448251813b32806c 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. */ - if (!acpi_pci_irq_valid(dev, pin)) + if (!acpi_pci_irq_valid(dev, pin)) { + kfree(entry); return 0; + } if (acpi_isa_register_gsi(dev)) dev_warn(&dev->dev, "PCI INT %c: no GSI\n", diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index b66815f35be6b685a9e65beaffa0f7c30d10c48f..317ecc2e5757b88f43670dad1189e0b587a574b9 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -454,8 +454,9 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) decode_osc_support(root, "OS supports", support); status = acpi_pci_osc_support(root, support); if (ACPI_FAILURE(status)) { - dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n", - acpi_format_exception(status)); + dev_info(&device->dev, "_OSC failed (%s)%s\n", + acpi_format_exception(status), + pcie_aspm_support_enabled() ? "; disabling ASPM" : ""); *no_aspm = 1; return; } diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 7a3431018e0ab4ce96dc055abf31444e498b04bb..5008ead4609a46edafe73b9481b9912c71efc464 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc) hc->callback = NULL; hc->context = NULL; mutex_unlock(&hc->lock); + acpi_os_wait_events_complete(); return 0; } @@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device) hc = acpi_driver_data(device); acpi_ec_remove_query_handler(hc->ec, hc->query_bit); + acpi_os_wait_events_complete(); kfree(hc); device->driver_data = NULL; return 0; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 5d16fc4fa46c7855a835434112c6801bc2587098..a8d4f4b5a77eba0fd715496f3f6f3bb43642adb9 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -100,7 +100,8 @@ config SATA_AHCI_PLATFORM config AHCI_BRCM tristate "Broadcom AHCI SATA support" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP + depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \ + ARCH_BCM_63XX help This option enables support for the AHCI SATA3 controller found on Broadcom SoC's. diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a3d60ccafd9aa32d4e7dee42a65c746940365f8e..f5eb102a2cf7669dc9a2014341f4bdaff255cdbe 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1734,6 +1734,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) return 1; } +static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks) +{ + struct request *rq = scmd->request; + u32 req_blocks; + + if (!blk_rq_is_passthrough(rq)) + return true; + + req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; + if (n_blocks > req_blocks) + return false; + + return true; +} + /** * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one * @qc: Storage for translated ATA taskfile @@ -1776,6 +1791,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_10_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_6: case WRITE_6: @@ -1790,6 +1807,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) */ if (!n_block) n_block = 256; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; case READ_16: case WRITE_16: @@ -1800,6 +1819,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) scsi_16_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; + if (!ata_check_nblocks(scmd, n_block)) + goto invalid_fld; break; default: DPRINTK("no-byte command\n"); diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index bd6b089c67a3aa220fa2aaf3803f8eb7737b2d4d..634c814cbeda405beb61749641087cf9490550ca 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) * start of new transfer. */ drv_data->dma_rx_data.port = EP93XX_DMA_IDE; - drv_data->dma_rx_data.direction = DMA_FROM_DEVICE; + drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM; drv_data->dma_rx_data.name = "ep93xx-pata-rx"; drv_data->dma_rx_channel = dma_request_channel(mask, ep93xx_pata_dma_filter, &drv_data->dma_rx_data); @@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) return; drv_data->dma_tx_data.port = EP93XX_DMA_IDE; - drv_data->dma_tx_data.direction = DMA_TO_DEVICE; + drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV; drv_data->dma_tx_data.name = "ep93xx-pata-tx"; drv_data->dma_tx_channel = dma_request_channel(mask, ep93xx_pata_dma_filter, &drv_data->dma_tx_data); @@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) /* Configure receive channel direction and source address */ memset(&conf, 0, sizeof(conf)); - conf.direction = DMA_FROM_DEVICE; + conf.direction = DMA_DEV_TO_MEM; conf.src_addr = drv_data->udma_in_phys; conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) { @@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) /* Configure transmit channel direction and destination address */ memset(&conf, 0, sizeof(conf)); - conf.direction = DMA_TO_DEVICE; + conf.direction = DMA_MEM_TO_DEV; conf.dst_addr = drv_data->udma_out_phys; conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) { diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 31c60101a69a42ddef0f58bab569c2e052cb1fbe..7fa840170151bd7e9439b5abf4a2db53a4576e13 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -199,7 +199,7 @@ config ATM_NICSTAR_USE_SUNI make the card work). config ATM_NICSTAR_USE_IDT77105 - bool "Use IDT77015 PHY driver (25Mbps)" + bool "Use IDT77105 PHY driver (25Mbps)" depends on ATM_NICSTAR help Support for the PHYsical layer chip in ForeRunner LE25 cards. In diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a0b88f1489905873955f1e1d3a1bc8f9eccc9df8..e23e2672a1d6b92c2ce0d86525653006f7c565f8 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0}; #define zin_n(r) inl(zatm_dev->base+r*4) #define zin(r) inl(zatm_dev->base+uPD98401_##r*4) #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) -#define zwait while (zin(CMR) & uPD98401_BUSY) +#define zwait() do {} while (zin(CMR) & uPD98401_BUSY) /* RX0, RX1, TX0, TX1 */ static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; @@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) { - zwait; + zwait(); zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); @@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) { - zwait; + zwait(); zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait; + zwait(); return zin(CER); } @@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool) } if (first) { spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(virt_to_bus(first),CER); zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, CMR); @@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc) } if (zatm_vcc->pool < 0) return -EMSGSIZE; spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_OPEN_CHAN,CMR); - zwait; + zwait(); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc) pos = vcc->vci >> 1; shift = (1-(vcc->vci & 1)) << 4; zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); - zwait; + zwait(); zout(uPD98401_NOP,CMR); - zwait; + zwait(); zout(uPD98401_NOP,CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); } spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); udelay(10); /* why oh why ... ? */ zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " "%d\n",vcc->dev->number,zatm_vcc->rx_chan); @@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n"); skb_queue_tail(&zatm_vcc->tx_queue,skb); DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ uPD98401_TXVC_QRP)); - zwait; + zwait(); zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << uPD98401_CHAN_ADDR_SHIFT),CMR); spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc) } spin_lock_irqsave(&zatm_dev->lock, flags); #if 0 - zwait; + zwait(); zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); #endif - zwait; + zwait(); zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait; + zwait(); if (!(zin(CMR) & uPD98401_CHAN_ADDR)) printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " "%d\n",vcc->dev->number,chan); @@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc) zatm_vcc->tx_chan = 0; if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; spin_lock_irqsave(&zatm_dev->lock, flags); - zwait; + zwait(); zout(uPD98401_OPEN_CHAN,CMR); - zwait; + zwait(); DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; spin_unlock_irqrestore(&zatm_dev->lock, flags); @@ -1559,7 +1559,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value, struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); - zwait; + zwait(); zout(value,CER); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); @@ -1571,10 +1571,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) struct zatm_dev *zatm_dev; zatm_dev = ZATM_DEV(dev); - zwait; + zwait(); zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait; + zwait(); return zin(CER) & 0xff; } diff --git a/drivers/base/component.c b/drivers/base/component.c index 89b032f2ffd222ed65ec771634fe1e694b916bbb..08da6160e94dd3521236dfdf567ac5a1ba61edc3 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -461,9 +461,9 @@ int component_bind_all(struct device *master_dev, void *data) } if (ret != 0) { - for (; i--; ) - if (!master->match->compare[i].duplicate) { - c = master->match->compare[i].component; + for (; i > 0; i--) + if (!master->match->compare[i - 1].duplicate) { + c = master->match->compare[i - 1].component; component_unbind(c, master, data); } } diff --git a/drivers/base/core.c b/drivers/base/core.c index ceb4c24ba49130539978f2569b4d52480795ba1f..ee783abc1c9650bf6cd0f17329fdc33edd91706f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -10,6 +10,7 @@ * */ +#include #include #include #include @@ -2082,6 +2083,8 @@ void device_shutdown(void) wait_for_device_probe(); device_block_probing(); + cpufreq_suspend(); + spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index f000f12c87c58ad91cfe06569e516518c75c9ddb..7fc135399140a5a35842a46476856e7d33e8fda4 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -628,12 +628,27 @@ ssize_t __weak cpu_show_mds(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); +static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); +static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -642,6 +657,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_spec_store_bypass.attr, &dev_attr_l1tf.attr, &dev_attr_mds.attr, + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, NULL }; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c5cdd190b7816ce446d6d7b06235c5f995c4c3f2..6a3694a4843f84963a342beb8520c7cf1caafca9 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -500,15 +500,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL; + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + nid = memory_add_physaddr_to_nid(phys_addr); - ret = add_memory(nid, phys_addr, - MIN_MEMORY_BLOCK_SIZE * sections_per_block); + ret = __add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block); if (ret) goto out; ret = count; out: + unlock_device_hotplug(); return ret; } diff --git a/drivers/base/soc.c b/drivers/base/soc.c index b63f23e6ad61b647ff26eecaeb4685e55fd67ad0..ddb32c890fa6afe44e7c5e1dd9aca6e456141cc5 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -145,6 +145,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr out1: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(soc_device_register); /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ void soc_device_unregister(struct soc_device *soc_dev) @@ -153,6 +154,7 @@ void soc_device_unregister(struct soc_device *soc_dev) device_unregister(&soc_dev->dev); } +EXPORT_SYMBOL_GPL(soc_device_unregister); static int __init soc_bus_register(void) { diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 5fd50a2841682b5a2479cfd210e3ab1a38a9d8fd..db4354fb2a0d3a9fc9d1bead51bd0cc46082c8b8 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1699,11 +1699,41 @@ static const struct block_device_operations floppy_fops = { .check_events = amiga_check_events, }; +static struct gendisk *fd_alloc_disk(int drive) +{ + struct gendisk *disk; + + disk = alloc_disk(1); + if (!disk) + goto out; + + disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); + if (IS_ERR(disk->queue)) { + disk->queue = NULL; + goto out_put_disk; + } + + unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL); + if (!unit[drive].trackbuf) + goto out_cleanup_queue; + + return disk; + +out_cleanup_queue: + blk_cleanup_queue(disk->queue); + disk->queue = NULL; +out_put_disk: + put_disk(disk); +out: + unit[drive].type->code = FD_NODRIVE; + return NULL; +} + static int __init fd_probe_drives(void) { int drive,drives,nomem; - printk(KERN_INFO "FD: probing units\nfound "); + pr_info("FD: probing units\nfound"); drives=0; nomem=0; for(drive=0;drivecode == FD_NODRIVE) continue; - disk = alloc_disk(1); + + disk = fd_alloc_disk(drive); if (!disk) { - unit[drive].type->code = FD_NODRIVE; + pr_cont(" no mem for fd%d", drive); + nomem = 1; continue; } unit[drive].gendisk = disk; - - disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); - if (!disk->queue) { - unit[drive].type->code = FD_NODRIVE; - continue; - } - drives++; - if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { - printk("no mem for "); - unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */ - drives--; - nomem = 1; - } - printk("fd%d ",drive); + + pr_cont(" fd%d",drive); disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; @@ -1742,11 +1762,11 @@ static int __init fd_probe_drives(void) } if ((drives > 0) || (nomem == 0)) { if (drives == 0) - printk("no drives"); - printk("\n"); + pr_cont(" no drives"); + pr_cont("\n"); return drives; } - printk("\n"); + pr_cont("\n"); return -ENOMEM; } @@ -1837,30 +1857,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev) return ret; } -#if 0 /* not safe to unload */ -static int __exit amiga_floppy_remove(struct platform_device *pdev) -{ - int i; - - for( i = 0; i < FD_MAX_UNITS; i++) { - if (unit[i].type->code != FD_NODRIVE) { - struct request_queue *q = unit[i].gendisk->queue; - del_gendisk(unit[i].gendisk); - put_disk(unit[i].gendisk); - kfree(unit[i].trackbuf); - if (q) - blk_cleanup_queue(q); - } - } - blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); - free_irq(IRQ_AMIGA_CIAA_TB, NULL); - free_irq(IRQ_AMIGA_DSKBLK, NULL); - custom.dmacon = DMAF_DISK; /* disable DMA */ - amiga_chip_free(raw_buf); - unregister_blkdev(FLOPPY_MAJOR, "fd"); -} -#endif - static struct platform_driver amiga_floppy_driver = { .driver = { .name = "amiga-floppy", diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6930abef42b31010a9ebbf2a54ded2f71f03a0be..ece4f706b38fb89732bcdf072c9266952f93cfca 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3784,7 +3784,7 @@ static int compat_getdrvprm(int drive, v.native_format = UDP->native_format; mutex_unlock(&floppy_mutex); - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params))) + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params))) return -EFAULT; return 0; } @@ -3820,7 +3820,7 @@ static int compat_getdrvstat(int drive, bool poll, v.bufblocks = UDRS->bufblocks; mutex_unlock(&floppy_mutex); - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) return -EFAULT; return 0; Eintr: diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b3e432a8a5670e70a9feb973e84d563e6d8fc509..00574470837faadb45e9521594aaebc1137a678e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1602,6 +1602,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, arg = (unsigned long) compat_ptr(arg); case LOOP_SET_FD: case LOOP_CHANGE_FD: + case LOOP_SET_DIRECT_IO: case LOOP_SET_BLOCK_SIZE: err = lo_ioctl(bdev, mode, cmd, arg); break; diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index a803e72ffd507c22b0f9ec6753d598cf202c9456..b3ec52c7099e25a2461c90a78ea2bea952f9f661 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -509,7 +509,7 @@ static int bt_dt_parse_vreg_info(struct device *dev, static int bt_dt_parse_clk_info(struct device *dev, struct bt_power_clk_data **clk_data) { - int ret = -EINVAL; + int ret = 0; struct bt_power_clk_data *clk = NULL; struct device_node *np = dev->of_node; @@ -546,7 +546,7 @@ static int bt_dt_parse_clk_info(struct device *dev, *clk_data = clk; } else { - BT_PWR_ERR("clocks is not provided in device tree"); + BT_PWR_INFO("clocks is not provided in device tree"); } err: @@ -567,7 +567,7 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev) of_get_named_gpio(pdev->dev.of_node, "qca,bt-reset-gpio", 0); if (bt_power_pdata->bt_gpio_sys_rst < 0) - BT_PWR_ERR("bt-reset-gpio not provided in device tree"); + BT_PWR_INFO("bt-reset-gpio not provided in devicetree"); rc = bt_dt_parse_vreg_info(&pdev->dev, &bt_power_pdata->bt_vdd_core, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8dce1a8900788de6869ac9e2a4296bb5811c421e..1d1c0d7aec885a1b90bb6c56ae01677c84f72338 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -362,6 +362,9 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8822BE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index 34e04bf87a620a995a0dfaad52f4846519913dcf..26f9982bab263b47d9afbd63997e6b86248ccad3 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -605,6 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else @@ -620,6 +621,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; @@ -644,6 +646,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index a2f6953a86f5e4e01f29e9b9a6f405d804da3ec0..0a21fb86fd67d9322b73aa52c04a31ff500f0e29 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -653,15 +653,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) return err; hu->proto = p; - set_bit(HCI_UART_PROTO_READY, &hu->flags); err = hci_uart_register_dev(hu); if (err) { - clear_bit(HCI_UART_PROTO_READY, &hu->flags); p->close(hu); return err; } + set_bit(HCI_UART_PROTO_READY, &hu->flags); return 0; } diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c index ccde51335022b9bdc06f5d23938a9bd79a9c6330..58bbcfc67ef64cfb9c86f1270f259e6c0949c02f 100644 --- a/drivers/bus/mhi/core/mhi_boot.c +++ b/drivers/bus/mhi/core/mhi_boot.c @@ -62,7 +62,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, sequence_id); - MHI_LOG("address:%pad len:0x%lx sequence:%u\n", + MHI_LOG("address:%pad len:0x%zx sequence:%u\n", &mhi_buf->dma_addr, mhi_buf->len, sequence_id); } @@ -212,7 +212,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, mhi_cntrl->sequence_id); read_unlock_bh(pm_lock); - MHI_LOG("Upper:0x%x Lower:0x%x len:0x%lx sequence:%u\n", + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", upper_32_bits(mhi_buf->dma_addr), lower_32_bits(mhi_buf->dma_addr), mhi_buf->len, mhi_cntrl->sequence_id); @@ -361,8 +361,8 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, if (!mhi_buf->buf) goto error_alloc_segment; - MHI_LOG("Entry:%d Address:0x%llx size:%lu\n", i, - mhi_buf->dma_addr, mhi_buf->len); + MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i, + (u64)mhi_buf->dma_addr, mhi_buf->len); } img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index df725ae6be8945248b8bc04864d802005912ccad..53a9e5ca0e1d51085d194e2e7688a99a292201bd 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -23,6 +23,8 @@ #include #include "mhi_internal.h" +static struct dentry *mhi_parent; + const char * const mhi_ee_str[MHI_EE_MAX] = { [MHI_EE_PBL] = "PBL", [MHI_EE_SBL] = "SBL", @@ -1388,7 +1390,8 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) mhi_cntrl->mhi_dev = mhi_dev; - mhi_cntrl->parent = debugfs_lookup(mhi_bus_type.name, NULL); + mhi_cntrl->parent = mhi_parent; + mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; /* adding it to this list only for debug purpose */ @@ -1780,7 +1783,7 @@ static int __init mhi_init(void) INIT_LIST_HEAD(&mhi_bus.controller_list); /* parent directory */ - debugfs_create_dir(mhi_bus_type.name, NULL); + mhi_parent = debugfs_create_dir(mhi_bus_type.name, NULL); ret = bus_register(&mhi_bus_type); diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index 106f3afefd135b5933fc3cb49617d5919c875750..d0bc74b5af94c3f626b38c134969b373cf56c18d 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -517,10 +517,10 @@ enum MHI_XFER_TYPE { #define MHI_DEV_WAKE_DB (127) #define MHI_MAX_MTU (0xffff) -#define MHI_BW_SCALE_SETUP(er_index) ((MHI_BW_SCALE_CHAN_DB << \ - BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK | \ - (1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK | \ - ((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK) +#define MHI_BW_SCALE_SETUP(er_index) (((MHI_BW_SCALE_CHAN_DB << \ + BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK) | \ + ((1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK) | \ + (((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK)) #define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF)) #define MHI_BW_SCALE_NACK 0xF diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 91e87f07989b78bd35612232276039d83988c883..7404b8e8cfb686447353a6b382401c4b403c7e9b 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "mhi_internal.h" static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, @@ -2022,8 +2023,8 @@ int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) seq_printf(m, " rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n", er_ctxt->rp, er_ctxt->wp, - mhi_to_physical(ring, ring->rp), - mhi_event->db_cfg.db_val); + (u64)mhi_to_physical(ring, ring->rp), + (u64)mhi_event->db_cfg.db_val); } } @@ -2056,9 +2057,9 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->wp, - mhi_to_physical(ring, ring->rp), - mhi_to_physical(ring, ring->wp), - mhi_chan->db_cfg.db_val); + (u64)mhi_to_physical(ring, ring->rp), + (u64)mhi_to_physical(ring, ring->wp), + (u64)mhi_chan->db_cfg.db_val); } } diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c index e5d7f0856a444c3e22d2e428f43ab31a8962b273..1a8535af4f2d91897f220e8d81a0ad013c7a6584 100644 --- a/drivers/bus/mhi/devices/mhi_uci.c +++ b/drivers/bus/mhi/devices/mhi_uci.c @@ -136,7 +136,7 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev) uci_buf = buf + mtu; uci_buf->data = buf; - MSG_VERB("Allocated buf %d of %d size %ld\n", i, nr_trbs, mtu); + MSG_VERB("Allocated buf %d of %d size %zx\n", i, nr_trbs, mtu); ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, MHI_EOT); @@ -281,7 +281,7 @@ static ssize_t mhi_uci_write(struct file *file, return -ERESTARTSYS; } - MSG_VERB("Enter: to xfer:%lu bytes\n", count); + MSG_VERB("Enter: to xfer:%zx bytes\n", count); while (count) { size_t xfer_size; @@ -304,7 +304,7 @@ static ssize_t mhi_uci_write(struct file *file, xfer_size = min_t(size_t, count, uci_dev->mtu); kbuf = kmalloc(xfer_size, GFP_KERNEL); if (!kbuf) { - MSG_ERR("Failed to allocate memory %lu\n", xfer_size); + MSG_ERR("Failed to allocate memory %zx\n", xfer_size); return -ENOMEM; } @@ -339,7 +339,7 @@ static ssize_t mhi_uci_write(struct file *file, } spin_unlock_bh(&uci_chan->lock); - MSG_VERB("Exit: Number of bytes xferred:%lu\n", bytes_xfered); + MSG_VERB("Exit: Number of bytes xferred:%zx\n", bytes_xfered); return bytes_xfered; @@ -365,7 +365,7 @@ static ssize_t mhi_uci_read(struct file *file, if (!buf) return -EINVAL; - MSG_VERB("Client provided buf len:%lu\n", count); + MSG_VERB("Client provided buf len:%zx\n", count); /* confirm channel is active */ spin_lock_bh(&uci_chan->lock); @@ -420,7 +420,7 @@ static ssize_t mhi_uci_read(struct file *file, if (ret) return ret; - MSG_VERB("Copied %lu of %lu bytes\n", to_copy, uci_chan->rx_size); + MSG_VERB("Copied %zx of %zx bytes\n", to_copy, uci_chan->rx_size); uci_chan->rx_size -= to_copy; /* we finished with this buffer, queue it back to hardware */ @@ -444,7 +444,7 @@ static ssize_t mhi_uci_read(struct file *file, spin_unlock_bh(&uci_chan->lock); } - MSG_VERB("Returning %lu bytes\n", to_copy); + MSG_VERB("Returning %zx bytes\n", to_copy); return to_copy; diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 676ffe8072c9768a8f543f5212ef744844e6db04..9033c23625ac6dcac6dc4da3993f9f5c6f4aa64f 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2722,6 +2722,7 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl, VERIFY(err, (fl && ud)); if (err) goto bail; + mutex_lock(&fl->map_mutex); mutex_lock(&fl->fl_map_mutex); if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) { pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n", @@ -2729,11 +2730,13 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl, (unsigned int)ud->len); err = -1; mutex_unlock(&fl->fl_map_mutex); + mutex_unlock(&fl->map_mutex); goto bail; } if (map) fastrpc_mmap_free(map, 0); mutex_unlock(&fl->fl_map_mutex); + mutex_unlock(&fl->map_mutex); bail: return err; } diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index d8bda0da172fdeda28e09e6fdd06f6224b664fb2..548bd58d44b7f3f4fb70b79bbc592f6a640a9988 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -1878,7 +1878,8 @@ static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header, DIAG_MAX_REQ_SIZE; write_len += sizeof(uint32_t); } else if (ss_cmd_code == DIAG_DIAG_STM) { - write_len = diag_process_stm_cmd(req_buf, payload_ptr); + write_len = diag_process_stm_cmd(req_buf, req_len, + payload_ptr); } } else if (subsys_id == DIAG_SS_PARAMS) { if (ss_cmd_code == DIAG_DIAG_POLL) { diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index f182a099a0808b935e7b433de5228292b79662fe..fcd2f355b4e774ec7a77c51c1c6b1cb348911c9d 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -591,7 +591,7 @@ void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type) } } -int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf) +int diag_process_stm_cmd(unsigned char *buf, int len, unsigned char *dest_buf) { uint8_t version, mask, cmd; uint8_t rsp_supported = 0; @@ -603,7 +603,11 @@ int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf) buf, dest_buf, __func__); return -EIO; } - + if (len < STM_CMD_NUM_BYTES) { + pr_err("diag: Invalid buffer length: %d in %s\n", len, + __func__); + return -EINVAL; + } version = *(buf + STM_CMD_VERSION_OFFSET); mask = *(buf + STM_CMD_MASK_OFFSET); cmd = *(buf + STM_CMD_DATA_OFFSET); @@ -1118,12 +1122,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid) } else if ((len >= ((2 * sizeof(uint8_t)) + sizeof(uint16_t))) && (*buf == 0x4b) && (*(buf+1) == 0x12) && (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) { - len = diag_process_stm_cmd(buf, driver->apps_rsp_buf); - if (len > 0) { - diag_send_rsp(driver->apps_rsp_buf, len, pid); + write_len = diag_process_stm_cmd(buf, len, + driver->apps_rsp_buf); + if (write_len > 0) { + diag_send_rsp(driver->apps_rsp_buf, write_len, pid); return 0; } - return len; + return write_len; } /* Check for time sync query command */ else if ((len >= ((2 * sizeof(uint8_t)) + sizeof(uint16_t))) && diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h index 687aeb78f30119e7c1c2274fd349d5be319f7811..066d1f9f2d44a06ccfb229322f1b1811cd4d50c5 100644 --- a/drivers/char/diag/diagfwd.h +++ b/drivers/char/diag/diagfwd.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2017, 2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,7 +46,7 @@ void diag_update_sleeping_process(int process_id, int data_type); int diag_process_apps_pkt(unsigned char *buf, int len, int pid); void diag_send_error_rsp(unsigned char *buf, int len, int pid); void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type); -int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf); +int diag_process_stm_cmd(unsigned char *buf, int len, unsigned char *dest_buf); void diag_md_hdlc_reset_timer_func(unsigned long pid); void diag_update_md_clients(unsigned int type); #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index d2d2c89de5b4428e627eb06d9733ec1bcf2c2b0d..5e79b4bfe27a936e2f00bc9b031d175ea998471a 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -88,7 +88,7 @@ static void add_early_randomness(struct hwrng *rng) size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_buffer, size, 1); + bytes_read = rng_get_data(rng, rng_buffer, size, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) add_device_randomness(rng_buffer, bytes_read); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 89adeb4905f1c16307c89fda25f1c315eeae44c6..eda67a6fa293a2eeae693273a6b34ab455e1ee61 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -283,6 +283,9 @@ struct smi_info { */ bool irq_enable_broken; + /* Is the driver in maintenance mode? */ + bool in_maintenance_mode; + /* * Did we get an attention that we did not handle? */ @@ -1093,11 +1096,20 @@ static int ipmi_thread(void *data) spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); - if (smi_result == SI_SM_CALL_WITHOUT_DELAY) + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { ; /* do nothing */ - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) - schedule(); - else if (smi_result == SI_SM_IDLE) { + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { + /* + * In maintenance mode we run as fast as + * possible to allow firmware updates to + * complete as fast as possible, but normally + * don't bang on the scheduler. + */ + if (smi_info->in_maintenance_mode) + schedule(); + else + usleep_range(100, 200); + } else if (smi_result == SI_SM_IDLE) { if (atomic_read(&smi_info->need_watch)) { schedule_timeout_interruptible(100); } else { @@ -1105,8 +1117,9 @@ static int ipmi_thread(void *data) __set_current_state(TASK_INTERRUPTIBLE); schedule(); } - } else + } else { schedule_timeout_interruptible(1); + } } return 0; } @@ -1285,6 +1298,7 @@ static void set_maintenance_mode(void *send_info, bool enable) if (!enable) atomic_set(&smi_info->req_events, 0); + smi_info->in_maintenance_mode = enable; } static const struct ipmi_smi_handlers handlers = { diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 593a8818aca99e345e03d0fb56eabd25b5ffd05a..e87a40c198fa6a1f3de1ab9642459b5d9f8b4cb4 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -96,6 +96,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) } #endif +static inline bool should_stop_iteration(void) +{ + if (need_resched()) + cond_resched(); + return fatal_signal_pending(current); +} + /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. @@ -162,6 +169,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, p += sz; count -= sz; read += sz; + if (should_stop_iteration()) + break; } *ppos += read; @@ -233,6 +242,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -446,6 +457,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf, read += sz; low_count -= sz; count -= sz; + if (should_stop_iteration()) { + count = 0; + break; + } } } @@ -470,6 +485,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, buf += sz; read += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } @@ -522,6 +539,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -573,6 +592,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, buf += sz; virtr += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 800ced0a5a2475f9d7bffb32001195e606eb9bd3..34548d3b4d13cf4801e4aa656bc295868310469c 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void) } } -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, int pages) { struct port_buffer *buf; @@ -445,7 +445,7 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, return buf; } - if (is_rproc_serial(vq->vdev)) { + if (is_rproc_serial(vdev)) { /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is @@ -455,9 +455,9 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, * DMA_MEMORY_INCLUDES_CHILDREN had been supported * in dma-coherent.c */ - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) + if (!vdev->dev.parent || !vdev->dev.parent->parent) goto free_buf; - buf->dev = vq->vdev->dev.parent->parent; + buf->dev = vdev->dev.parent->parent; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); @@ -841,7 +841,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, count = min((size_t)(32 * 1024), count); - buf = alloc_buf(port->out_vq, count, 0); + buf = alloc_buf(port->portdev->vdev, count, 0); if (!buf) return -ENOMEM; @@ -960,7 +960,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (ret < 0) goto error_out; - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs); if (!buf) { ret = -ENOMEM; goto error_out; @@ -1369,24 +1369,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols) port->cons.ws.ws_col = cols; } -static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) +static int fill_queue(struct virtqueue *vq, spinlock_t *lock) { struct port_buffer *buf; - unsigned int nr_added_bufs; + int nr_added_bufs; int ret; nr_added_bufs = 0; do { - buf = alloc_buf(vq, PAGE_SIZE, 0); + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) - break; + return -ENOMEM; spin_lock_irq(lock); ret = add_inbuf(vq, buf); if (ret < 0) { spin_unlock_irq(lock); free_buf(buf, true); - break; + return ret; } nr_added_bufs++; spin_unlock_irq(lock); @@ -1406,7 +1406,6 @@ static int add_port(struct ports_device *portdev, u32 id) char debugfs_name[16]; struct port *port; dev_t devt; - unsigned int nr_added_bufs; int err; port = kmalloc(sizeof(*port), GFP_KERNEL); @@ -1465,11 +1464,13 @@ static int add_port(struct ports_device *portdev, u32 id) spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); - /* Fill the in_vq with buffers so the host can send us data. */ - nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); - if (!nr_added_bufs) { + /* We can safely ignore ENOSPC because it means + * the queue already has buffers. Buffers are removed + * only by virtcons_remove(), not by unplug_port() + */ + err = fill_queue(port->in_vq, &port->inbuf_lock); + if (err < 0 && err != -ENOSPC) { dev_err(port->dev, "Error allocating inbufs\n"); - err = -ENOMEM; goto free_device; } @@ -1992,19 +1993,40 @@ static void remove_vqs(struct ports_device *portdev) kfree(portdev->out_vqs); } -static void remove_controlq_data(struct ports_device *portdev) +static void virtcons_remove(struct virtio_device *vdev) { - struct port_buffer *buf; - unsigned int len; + struct ports_device *portdev; + struct port *port, *port2; - if (!use_multiport(portdev)) - return; + portdev = vdev->priv; - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf, true); + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf, true); + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_vqs(portdev); + kfree(portdev); } /* @@ -2073,6 +2095,7 @@ static int virtcons_probe(struct virtio_device *vdev) spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); virtio_device_ready(portdev->vdev); @@ -2080,18 +2103,22 @@ static int virtcons_probe(struct virtio_device *vdev) INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { - unsigned int nr_added_bufs; - spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); - nr_added_bufs = fill_queue(portdev->c_ivq, - &portdev->c_ivq_lock); - if (!nr_added_bufs) { + err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + if (err < 0) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); - err = -ENOMEM; - goto free_vqs; + /* + * The host might want to notify mgmt sw about device + * add failure. + */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); + return err; } } else { /* @@ -2122,11 +2149,6 @@ static int virtcons_probe(struct virtio_device *vdev) return 0; -free_vqs: - /* The host might want to notify mgmt sw about device add failure */ - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 0); - remove_vqs(portdev); free_chrdev: unregister_chrdev(portdev->chr_major, "virtio-portsdev"); free: @@ -2135,43 +2157,6 @@ static int virtcons_probe(struct virtio_device *vdev) return err; } -static void virtcons_remove(struct virtio_device *vdev) -{ - struct ports_device *portdev; - struct port *port, *port2; - - portdev = vdev->priv; - - spin_lock_irq(&pdrvdata_lock); - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ - if (use_multiport(portdev)) - cancel_work_sync(&portdev->control_work); - else - cancel_work_sync(&portdev->config_work); - - list_for_each_entry_safe(port, port2, &portdev->ports, list) - unplug_port(port); - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - - /* - * When yanking out a device, we immediately lose the - * (device-side) queues. So there's no point in keeping the - * guest side around till we drop our final reference. This - * also means that any ports which are in an open state will - * have to just stop using the port, as the vqs are going - * away. - */ - remove_controlq_data(portdev); - remove_vqs(portdev); - kfree(portdev); -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -2202,15 +2187,16 @@ static int virtcons_freeze(struct virtio_device *vdev) vdev->config->reset(vdev); - virtqueue_disable_cb(portdev->c_ivq); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. */ - virtqueue_disable_cb(portdev->c_ivq); - remove_controlq_data(portdev); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); list_for_each_entry(port, &portdev->ports, list) { virtqueue_disable_cb(port->in_vq); diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index c813c27f2e58c6f8e62c523a1a9a8b1d212a96d8..2f97a843d6d6bcbd7a6fe7f90849dabfbbe54db6 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -27,6 +27,10 @@ #define MOR_KEY_MASK (0xff << 16) +#define clk_main_parent_select(s) (((s) & \ + (AT91_PMC_MOSCEN | \ + AT91_PMC_OSCBYPASS)) ? 1 : 0) + struct clk_main_osc { struct clk_hw hw; struct regmap *regmap; @@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw) regmap_read(regmap, AT91_PMC_SR, &status); - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp); } static const struct clk_ops main_osc_ops = { @@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - return status & AT91_PMC_MOSCEN ? 1 : 0; + return clk_main_parent_select(status); } static const struct clk_ops sam9x5_main_ops = { @@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, clkmain->hw.init = &init; clkmain->regmap = regmap; regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; + clkmain->parent = clk_main_parent_select(status); hw = &clkmain->hw; ret = clk_hw_register(NULL, &clkmain->hw); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 80ae2a51452d7410c1edc856f8126d6451c75913..cdce49f6476aaa8516bef9d371b75158d0342c05 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -540,7 +540,7 @@ static const struct clockgen_chipinfo chipinfo[] = { .guts_compat = "fsl,qoriq-device-config-1.0", .init_periph = p5020_init_periph, .cmux_groups = { - &p2041_cmux_grp1, &p2041_cmux_grp2 + &p5020_cmux_grp1, &p5020_cmux_grp2 }, .cmux_to_group = { 0, 1, -1 diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 9adaf48aea2317625a520bb42635fa8362946893..061a9f10218b3f9da83cb220b67f2bc61db00a8f 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { /* The gate clocks has mux parent. */ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index fe7d9ed1d4364eba777e6104bda92504e9559aff..b0a18bc1a27fdd773e7384ac70b4be03145a1f8c 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -59,10 +59,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) u32 delay_num = 0; /* See the comment for rockchip_mmc_set_phase below */ - if (!rate) { - pr_err("%s: invalid clk rate\n", __func__); + if (!rate) return -EINVAL; - } raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 8bf7e805fd34913ad08622824309753a86850b3a..1271315b0c25b318b68def45dc567f0a9b9de45a 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, else cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; - cpuclk->alt_parent = __clk_lookup(alt_parent); + cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent)); if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent %s\n", __func__, alt_parent); diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h index d4b6b517fe1b44689df28853cf894baa3799153d..bd38c6aa389706c92261cd72f4f0c8fd8906ba67 100644 --- a/drivers/clk/samsung/clk-cpu.h +++ b/drivers/clk/samsung/clk-cpu.h @@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data { */ struct exynos_cpuclk { struct clk_hw hw; - struct clk *alt_parent; + struct clk_hw *alt_parent; void __iomem *ctrl_base; spinlock_t *lock; const struct exynos_cpuclk_cfg_data *cfg; diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index 77e1e2491689b9c37e943c43e8fabf5778a561aa..edb7197cc4b4dec3986432e50324f736eb345922 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return 4; WARN_ON((cfg & (BIT(3) - 1)) > 4); @@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent) { struct clk_dmn *clk = to_dmnclk(hw); u32 cfg = clkc_readl(clk->regofs); + const char *name = clk_hw_get_name(hw); /* parent of io domain can only be pll3 */ - if (strcmp(hw->init->name, "io") == 0) + if (strcmp(name, "io") == 0) return -EINVAL; cfg &= ~(BIT(3) - 1); @@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate, { unsigned long fin; unsigned ratio, wait, hold; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = *parent_rate; ratio = fin / rate; @@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_dmn *clk = to_dmnclk(hw); unsigned long fin; unsigned ratio, wait, hold, reg; - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4; + const char *name = clk_hw_get_name(hw); + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4; fin = parent_rate; ratio = fin / rate; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fd33dddab64b98d8bd606a42dc0c873511e28950..34355a9e896479d1325015d0654363aae951d4b0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -994,6 +994,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct freq_attr *fattr = to_attr(attr); ssize_t ret; + if (!fattr->show) + return -EIO; + down_read(&policy->rwsem); ret = fattr->show(policy, buf); up_read(&policy->rwsem); @@ -1008,6 +1011,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; + if (!fattr->store) + return -EIO; + get_online_cpus(); if (cpu_online(policy->cpu)) { @@ -1773,6 +1779,9 @@ void cpufreq_resume(void) if (!cpufreq_driver) return; + if (unlikely(!cpufreq_suspended)) + return; + cpufreq_suspended = false; if (!has_target() && !cpufreq_driver->resume) @@ -2689,14 +2698,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); -/* - * Stop cpufreq at shutdown to make sure it isn't holding any locks - * or mutexes when secondary CPUs are halted. - */ -static struct syscore_ops cpufreq_syscore_ops = { - .shutdown = cpufreq_suspend, -}; - struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); @@ -2708,8 +2709,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); - return 0; } core_initcall(cpufreq_core_init); diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index fdf288817df99698c8d968e5e66a638508ecc9f5..88d061d11cd20400ef01d2f699fc9fc20125abd5 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * Copyright (C) 2006-2007 Adam Belay * Copyright (C) 2009 Intel Corporation * @@ -1314,7 +1314,7 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) if (!idx) { stop_critical_timings(); - wfi(); + cpu_do_idle(); start_critical_timings(); return 1; } diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index f8ac768ed5d737ae7027766977559d4c518df9dd..413e1f35773f7647667f5e408ab66e5d5826e124 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -75,7 +75,7 @@ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 10 * CAAM_CMD_SZ) /* Note: Nonce is counted in enckeylen */ #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) @@ -474,6 +474,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) u32 geniv, moveiv; u32 ctx1_iv_off = 0; u32 *desc; + u32 *wait_cmd; const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; @@ -736,6 +737,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* + * Wait for IV transfer (ofifo -> class2) to finish before starting + * ciphertext transfer (ofifo -> external memory). + */ + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_cmd); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index decaed448ebbbd14ac2cd43ea9e0c0d39d784451..b4bd34429cc1477857b19a2e4d31f816a626d3fa 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -28,9 +28,24 @@ #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE +#define DCP_SHA_PAY_SZ 64 #define DCP_ALIGNMENT 64 +/* + * Null hashes to align with hw behavior on imx6sl and ull + * these are flipped for consistency with hw output + */ +const uint8_t sha1_null_hash[] = + "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" + "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; + +const uint8_t sha256_null_hash[] = + "\x55\xb8\x52\x78\x1b\x99\x95\xa4" + "\x4c\x93\x9b\x64\xe4\x41\xae\x27" + "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" + "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3"; + /* DCP DMA descriptor. */ struct dcp_dma_desc { uint32_t next_cmd_addr; @@ -48,6 +63,7 @@ struct dcp_coherent_block { uint8_t aes_in_buf[DCP_BUF_SZ]; uint8_t aes_out_buf[DCP_BUF_SZ]; uint8_t sha_in_buf[DCP_BUF_SZ]; + uint8_t sha_out_buf[DCP_SHA_PAY_SZ]; uint8_t aes_key[2 * AES_KEYSIZE_128]; @@ -209,6 +225,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, DCP_BUF_SZ, DMA_FROM_DEVICE); + if (actx->fill % AES_BLOCK_SIZE) { + dev_err(sdcp->dev, "Invalid block size!\n"); + ret = -EINVAL; + goto aes_done_run; + } + /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | @@ -238,6 +260,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, ret = mxs_dcp_start_dma(actx); +aes_done_run: dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); @@ -264,13 +287,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) uint8_t *out_tmp, *src_buf, *dst_buf = NULL; uint32_t dst_off = 0; + uint32_t last_out_len = 0; uint8_t *key = sdcp->coh->aes_key; int ret = 0; int split = 0; - unsigned int i, len, clen, rem = 0; + unsigned int i, len, clen, rem = 0, tlen = 0; int init = 0; + bool limit_hit = false; actx->fill = 0; @@ -289,6 +314,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) for_each_sg(req->src, src, nents, i) { src_buf = sg_virt(src); len = sg_dma_len(src); + tlen += len; + limit_hit = tlen > req->nbytes; + + if (limit_hit) + len = req->nbytes - (tlen - len); do { if (actx->fill + len > out_off) @@ -305,13 +335,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * If we filled the buffer or this is the last SG, * submit the buffer. */ - if (actx->fill == out_off || sg_is_last(src)) { + if (actx->fill == out_off || sg_is_last(src) || + limit_hit) { ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; out_tmp = out_buf; + last_out_len = actx->fill; while (dst && actx->fill) { if (!split) { dst_buf = sg_virt(dst); @@ -334,6 +366,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) } } } while (len); + + if (limit_hit) + break; + } + + /* Copy the IV for CBC for chaining */ + if (!rctx->ecb) { + if (rctx->enc) + memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); + else + memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE), + AES_BLOCK_SIZE); } return ret; @@ -513,8 +558,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); - struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; dma_addr_t digest_phys = 0; @@ -536,10 +579,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req) desc->payload = 0; desc->status = 0; + /* + * Align driver with hw behavior when generating null hashes + */ + if (rctx->init && rctx->fini && desc->size == 0) { + struct hash_alg_common *halg = crypto_hash_alg_common(tfm); + const uint8_t *sha_buf = + (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ? + sha1_null_hash : sha256_null_hash; + memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize); + ret = 0; + goto done_run; + } + /* Set HASH_TERM bit for last transfer block. */ if (rctx->fini) { - digest_phys = dma_map_single(sdcp->dev, req->result, - halg->digestsize, DMA_FROM_DEVICE); + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->payload = digest_phys; } @@ -547,9 +603,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) ret = mxs_dcp_start_dma(actx); if (rctx->fini) - dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, + dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); +done_run: dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); return ret; @@ -567,6 +624,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) const int nents = sg_nents(req->src); uint8_t *in_buf = sdcp->coh->sha_in_buf; + uint8_t *out_buf = sdcp->coh->sha_out_buf; uint8_t *src_buf; @@ -621,11 +679,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) actx->fill = 0; - /* For some reason, the result is flipped. */ - for (i = 0; i < halg->digestsize / 2; i++) { - swap(req->result[i], - req->result[halg->digestsize - i - 1]); - } + /* For some reason the result is flipped */ + for (i = 0; i < halg->digestsize; i++) + req->result[i] = out_buf[halg->digestsize - i - 1]; } return 0; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 980e07475012043ece2c2447307a5818cb53b2c0..0d596a99f564559290dd7332fdbd234b24c4a5ec 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -95,7 +95,7 @@ struct service_hndl { static inline int get_current_node(void) { - return topology_physical_package_id(smp_processor_id()); + return topology_physical_package_id(raw_smp_processor_id()); } int adf_service_register(struct service_hndl *service); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 500e4090e2fd417badf5f51af2e0cef3a8633d20..5a37c075ee5535c10aaf42ad279361dc9398a145 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -298,7 +298,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev) } static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, - struct scatterlist **dst) + struct scatterlist **dst) { void *pages; int len; @@ -510,7 +510,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev, } static int s5p_set_outdata_start(struct s5p_aes_dev *dev, - struct ablkcipher_request *req) + struct ablkcipher_request *req) { struct scatterlist *sg; int err; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index ea8595d2c3d87902b1ed18cc8882c2a13c1b8061..8b383d3d21c21a0f2ab95a380d73e76863fc390d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -943,11 +943,13 @@ static void talitos_sg_unmap(struct device *dev, static void ipsec_esp_unmap(struct device *dev, struct talitos_edesc *edesc, - struct aead_request *areq) + struct aead_request *areq, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(aead); unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], @@ -956,7 +958,7 @@ static void ipsec_esp_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen, areq->assoclen); if (edesc->dma_len) @@ -967,7 +969,7 @@ static void ipsec_esp_unmap(struct device *dev, unsigned int dst_nents = edesc->dst_nents ? : 1; sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, - areq->assoclen + areq->cryptlen - ivsize); + areq->assoclen + cryptlen - ivsize); } } @@ -988,7 +990,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, areq); + ipsec_esp_unmap(dev, edesc, areq, true); /* copy the generated ICV to dst */ if (edesc->icv_ool) { @@ -1020,7 +1022,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, req); + ipsec_esp_unmap(dev, edesc, req, false); if (!err) { char icvdata[SHA512_DIGEST_SIZE]; @@ -1066,7 +1068,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, edesc = container_of(desc, struct talitos_edesc, desc); - ipsec_esp_unmap(dev, edesc, req); + ipsec_esp_unmap(dev, edesc, req, false); /* check ICV auth status */ if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != @@ -1173,6 +1175,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, * fill in and submit ipsec_esp descriptor */ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, + bool encrypt, void (*callback)(struct device *dev, struct talitos_desc *desc, void *context, int error)) @@ -1182,7 +1185,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, struct talitos_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; - unsigned int cryptlen = areq->cryptlen; + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); unsigned int ivsize = crypto_aead_ivsize(aead); int tbl_off = 0; int sg_count, ret; @@ -1324,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { - ipsec_esp_unmap(dev, edesc, areq); + ipsec_esp_unmap(dev, edesc, areq, encrypt); kfree(edesc); } return ret; @@ -1433,9 +1436,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, unsigned int authsize = crypto_aead_authsize(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc); + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, - iv, areq->assoclen, areq->cryptlen, + iv, areq->assoclen, cryptlen, authsize, ivsize, icv_stashing, areq->base.flags, encrypt); } @@ -1454,7 +1458,7 @@ static int aead_encrypt(struct aead_request *req) /* set encrypt */ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; - return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); + return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done); } static int aead_decrypt(struct aead_request *req) @@ -1466,14 +1470,13 @@ static int aead_decrypt(struct aead_request *req) struct talitos_edesc *edesc; void *icvdata; - req->cryptlen -= authsize; - /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, req->iv, 1, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); - if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && + (priv->features & TALITOS_FTR_HW_AUTH_CHECK) && ((!edesc->src_nents && !edesc->dst_nents) || priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { @@ -1485,7 +1488,8 @@ static int aead_decrypt(struct aead_request *req) /* reset integrity check result bits */ edesc->desc.hdr_lo = 0; - return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); + return ipsec_esp(edesc, req, false, + ipsec_esp_decrypt_hwauth_done); } /* Have to check the ICV with software */ @@ -1501,7 +1505,7 @@ static int aead_decrypt(struct aead_request *req) sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize, req->assoclen + req->cryptlen - authsize); - return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); + return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done); } static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, @@ -1528,6 +1532,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } +static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 || + keylen == AES_KEYSIZE_256) + return ablkcipher_setkey(cipher, key, keylen); + + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + + return -EINVAL; +} + static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) @@ -1656,6 +1672,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); + + if (!areq->nbytes) + return 0; + + if (areq->nbytes % blocksize) + return -EINVAL; /* allocate extended descriptor */ edesc = ablkcipher_edesc_alloc(areq, true); @@ -1673,6 +1697,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); + + if (!areq->nbytes) + return 0; + + if (areq->nbytes % blocksize) + return -EINVAL; /* allocate extended descriptor */ edesc = ablkcipher_edesc_alloc(areq, false); @@ -2621,6 +2653,7 @@ static struct talitos_alg_template driver_algs[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2631,13 +2664,13 @@ static struct talitos_alg_template driver_algs[] = { .alg.crypto = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-talitos", - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, } }, .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | @@ -3010,6 +3043,7 @@ static int talitos_remove(struct platform_device *ofdev) break; case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&t_alg->algt.alg.aead); + break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&t_alg->algt.alg.hash); break; diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index 1b21bb60e7975ac91ac9735cd0b239148d4df292..2c8f41fbe94fb71c00e3471b6b8719b6d49547fc 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -198,11 +198,10 @@ static void exynos_bus_exit(struct device *dev) if (ret < 0) dev_warn(dev, "failed to disable the devfreq-event devices\n"); - if (bus->regulator) - regulator_disable(bus->regulator); - dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); + if (bus->regulator) + regulator_disable(bus->regulator); } /* @@ -391,6 +390,7 @@ static int exynos_bus_probe(struct platform_device *pdev) struct exynos_bus *bus; int ret, max_state; unsigned long min_freq, max_freq; + bool passive = false; if (!np) { dev_err(dev, "failed to find devicetree node\n"); @@ -404,27 +404,27 @@ static int exynos_bus_probe(struct platform_device *pdev) bus->dev = &pdev->dev; platform_set_drvdata(pdev, bus); - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - return ret; - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) { - ret = -ENOMEM; - goto err; - } + if (!profile) + return -ENOMEM; node = of_parse_phandle(dev->of_node, "devfreq", 0); if (node) { of_node_put(node); - goto passive; + passive = true; } else { ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; } + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); if (ret < 0) - goto err; + goto err_reg; + + if (passive) + goto passive; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -514,6 +514,9 @@ static int exynos_bus_probe(struct platform_device *pdev) err: dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); +err_reg: + if (!passive) + regulator_disable(bus->regulator); return ret; } diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 5be96b2249e72a1e8cf1280f1c5a10bdfd54a6d3..62c262fc2178d4cbf26619a047ccf52652fba2ea 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb, static int devfreq_passive_event_handler(struct devfreq *devfreq, unsigned int event, void *data) { - struct device *dev = devfreq->dev.parent; struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; @@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, p_data->this = devfreq; nb->notifier_call = devfreq_passive_notifier_call; - ret = devm_devfreq_register_notifier(dev, parent, nb, + ret = devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); break; case DEVFREQ_GOV_STOP: - devm_devfreq_unregister_notifier(dev, parent, nb, - DEVFREQ_TRANSITION_NOTIFIER); + WARN_ON(devfreq_unregister_notifier(parent, nb, + DEVFREQ_TRANSITION_NOTIFIER)); break; default: break; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 4e2379b6c71b8d508732f125e0cf9c7b2fba32af..de06728a81b2daae6a31e81954fd9627d499ba5e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -120,7 +120,7 @@ config DMA_JZ4740 config DMA_JZ4780 tristate "JZ4780 DMA support" - depends on MACH_JZ4780 || COMPILE_TEST + depends on MIPS || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6ba53bbd0e1616d5771dcf44cf068b71a7bcfacd..b984d00bc05587240961198f38efb26e86d72d65 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -891,8 +891,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (rc) + if (rc) { + dev_err(&pdev->dev, "Unable to set DMA mask\n"); return rc; + } od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 2ceb5a26f860fab1f3450819bbf08f89f5626f47..709e1308777ae1f38bc7af9a167e3b22fcbabe73 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct dma_async_tx_descriptor *txd = NULL; + struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + + return NULL; + } + + if (cdd->is_suspended) + goto err_out_not_ready; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { @@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( d++; } - return &c->txd; + txd = &c->txd; + +err_out_not_ready: + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + + return txd; } static void cppi41_compute_td_desc(struct cppi41_desc *d) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 803cfb4523b08b4aef2fe4b77d5f3090e93b63b3..aca2d6fd92d56a49d943d95225ca5871f407354d 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -580,7 +580,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, to_jz4780_dma_desc(vdesc), 0); } else if (cookie == jzchan->desc->vdesc.tx.cookie) { txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, - (jzchan->curr_hwdesc + 1) % jzchan->desc->count); + jzchan->curr_hwdesc + 1); } else txstate->residue = 0; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 57962bff75324c75ff96f38cd3f9ab9b0703e0ec..72f31e837b1d5a759e1686d3cf1ba8b54690a93a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -2268,9 +2268,6 @@ static int edma_probe(struct platform_device *pdev) ecc->default_queue = info->default_queue; - for (i = 0; i < ecc->num_slots; i++) - edma_write_slot(ecc, i, &dummy_paramset); - if (info->rsv) { /* Set the reserved slots in inuse list */ rsv_slots = info->rsv->rsv_slots; @@ -2283,6 +2280,12 @@ static int edma_probe(struct platform_device *pdev) } } + for (i = 0; i < ecc->num_slots; i++) { + /* Reset only unused - not reserved - paRAM slots */ + if (!test_bit(i, ecc->slot_inuse)) + edma_write_slot(ecc, i, &dummy_paramset); + } + /* Clear the xbar mapped channels in unused list */ xbar_chans = info->xbar_chans; if (xbar_chans) { diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index d139706f01fe81a6e32c0a2bc92a1e05502d3932..9d936584d331008d253363622a425b62765a382a 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -129,7 +129,7 @@ static void ioat_init_channel(struct ioatdma_device *ioat_dma, struct ioatdma_chan *ioat_chan, int idx); static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); -static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); +static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma); static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); static int ioat_dca_enabled = 1; @@ -573,7 +573,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) * ioat_enumerate_channels - find and initialize the device's channels * @ioat_dma: the ioat dma device to be enumerated */ -static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) +static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; struct device *dev = &ioat_dma->pdev->dev; @@ -592,7 +592,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) - return 0; + return; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { @@ -609,7 +609,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) } } dma->chancnt = i; - return i; } /** diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index a410657f7bcd6f61c0335281e1d0997e540f6dbc..012584cf3c17bf727f843afe70505c59255b0a9d 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) list_for_each_entry_safe(iter, _iter, &iop_chan->chain, chain_node) { pr_debug("\tcookie: %d slot: %d busy: %d " - "this_desc: %#x next_desc: %#x ack: %d\n", + "this_desc: %#x next_desc: %#llx ack: %d\n", iter->async_tx.cookie, iter->idx, busy, - iter->async_tx.phys, iop_desc_get_next_desc(iter), + iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter), async_tx_test_ack(&iter->async_tx)); prefetch(_iter); prefetch(&_iter->async_tx); @@ -315,9 +315,9 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, int i; dev_dbg(iop_chan->device->common.dev, "allocated slot: %d " - "(desc %p phys: %#x) slots_per_op %d\n", + "(desc %p phys: %#llx) slots_per_op %d\n", iter->idx, iter->hw_desc, - iter->async_tx.phys, slots_per_op); + (u64)iter->async_tx.phys, slots_per_op); /* pre-ack all but the last descriptor */ if (num_slots != slots_per_op) @@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, return NULL; BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n", __func__, len); spin_lock_bh(&iop_chan->lock); @@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); spin_lock_bh(&iop_chan->lock); @@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, if (unlikely(!len)) return NULL; - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); @@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); if (dmaf_p_disabled_continue(flags)) @@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, return NULL; BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 6b16ce390dce182409c2bdbfc287bd2172a2a953..9f901f16bddcdc07a200b9a4dfc6cf5a2bfff1de 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1429,8 +1429,10 @@ static int omap_dma_probe(struct platform_device *pdev) rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, IRQF_SHARED, "omap-dma-engine", od); - if (rc) + if (rc) { + omap_dma_free(od); return rc; + } } if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 6497f5283e3b08f76ba59f44d7be355b02fea1f5..81acbde133946fbb278a92360529365ff3696916 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -686,7 +686,21 @@ static int bam_dma_terminate_all(struct dma_chan *chan) /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); + /* + * If we have transactions queued, then some might be committed to the + * hardware in the desc fifo. The only way to reset the desc fifo is + * to do a hardware reset (either by pipe or the entire block). + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the + * pipe. If the pipe is left disabled (default state after pipe reset) + * and is accessed by a connected hardware engine, a fatal error in + * the BAM will occur. There is a small window where this could happen + * with bam_chan_init_hw(), but it is assumed that the caller has + * stopped activity on any attached hardware engine. Make sure to do + * this first so that the BAM hardware doesn't cause memory corruption + * by accessing freed resources. + */ if (bchan->curr_txd) { + bam_chan_init_hw(bchan, bchan->curr_txd->dir); list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); bchan->curr_txd = NULL; } diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 8c3c588834d2e0960f44f96a269c9daf4ad3360b..a7e1f6e17e3d177f85d350ba5a78befe5d220980 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); - if (ret) + if (ret) { + kfree(rsv_events); return ret; + } for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 896bafb7a53240e3bac1679a79177840fc81bcd1..cf6588cc3efdc912a33ada465df21d2e1a96464f 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, } dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, - td_desc->desc_list_len, DMA_MEM_TO_DEV); + td_desc->desc_list_len, DMA_TO_DEVICE); return &td_desc->txd; } diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8288fe4d17c38ec7b3094080bb409392ebaa5651..cd271f78260516483aa792d4a7a621c3afbdc09f 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -72,6 +72,9 @@ #define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) #define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) @@ -2054,8 +2057,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.gen_lock = cfg->gen_lock; chan->config.master = cfg->master; + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; if (cfg->gen_lock && chan->genlock) { dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; } @@ -2069,11 +2074,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.delay = cfg->delay; if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; chan->config.coalesc = cfg->coalesc; } if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; chan->config.delay = cfg->delay; } diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index b0bd0f64d8f2166348c0ed49701b48f138766c02..6037efa94c9bab5e25f493edcaa7c28e1e057423 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1651,6 +1651,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); int irq = irq_desc_get_irq(desc); + unsigned long bits; dberr = (irq == edac->db_irq) ? 1 : 0; sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST : @@ -1660,7 +1661,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status); - for_each_set_bit(bit, (unsigned long *)&irq_status, 32) { + bits = irq_status; + for_each_set_bit(bit, &bits, 32) { irq = irq_linear_revmap(edac->domain, dberr * 32 + bit); if (irq) generic_handle_irq(irq); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index d4253742543841d6d261dfea22156259b0a13183..c0e54396f250226127042574f71cbb78a02c3482 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -375,7 +375,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, pcie->device_id.vendor_id, pcie->device_id.device_id); p = pcie->device_id.class_code; - printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); + printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]); } if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, @@ -384,6 +384,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); + + /* Fatal errors call __ghes_panic() before AER handler prints this */ + if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && + (gdata->error_severity & CPER_SEV_FATAL)) { + struct aer_capability_regs *aer; + + aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", + pfx, aer->uncor_status, aer->uncor_mask); + printk("%saer_uncor_severity: 0x%08x\n", + pfx, aer->uncor_severity); + printk("%sTLP Header: %08x %08x %08x %08x\n", pfx, + aer->header_log.dw0, aer->header_log.dw1, + aer->header_log.dw2, aer->header_log.dw3); + } } static void cper_estatus_print_section( diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2f47c5b5f4cb8fb837f4c154a976112e06ea37d2..d89457d62a2423d292976f9610a3e99ade90dd3f 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -243,6 +243,9 @@ static __init int efivar_ssdt_load(void) void *data; int ret; + if (!efivar_ssdt[0]) + return 0; + ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); list_for_each_entry_safe(entry, aux, &entries, list) { diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c463871609764d276f80be43e6bf587da5ea8862..98cdfc2ee0dff28756050eaec150840023ef21c1 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, if (count < sizeof(u32)) return -EINVAL; param.type = *(u32 *)buf; - count -= sizeof(u32); buf += sizeof(u32); /* The remaining buffer is the data payload */ - if (count > gsmi_dev.data_buf->length) + if ((count - sizeof(u32)) > gsmi_dev.data_buf->length) return -EINVAL; param.data_len = count - sizeof(u32); @@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, spin_unlock_irqrestore(&gsmi_dev.lock, flags); - return rc; + return (rc == 0) ? count : rc; } diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c index b46b436cb97feaf3966fb240f8c727f2dc3b8cd1..52cac7c600eea2415d9d8ad8a6eda91e1e35e2ac 100644 --- a/drivers/gpio/gpio-max77620.c +++ b/drivers/gpio/gpio-max77620.c @@ -167,13 +167,13 @@ static int max77620_gpio_set_debounce(struct gpio_chip *gc, case 0: val = MAX77620_CNFG_GPIO_DBNC_None; break; - case 1 ... 8: + case 1 ... 8000: val = MAX77620_CNFG_GPIO_DBNC_8ms; break; - case 9 ... 16: + case 8001 ... 16000: val = MAX77620_CNFG_GPIO_DBNC_16ms; break; - case 17 ... 32: + case 16001 ... 32000: val = MAX77620_CNFG_GPIO_DBNC_32ms; break; default: diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 537cec7583fcac038865c38c8731182287fcb4b1..cf88a0bfe99eafd443bcf1294f873b1aa1d999f1 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val) BIT(offs % SYSCON_REG_BITS)); } - priv->data->set(chip, offset, val); + chip->set(chip, offset, val); return 0; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3b0d77b2fdc559249f37bd00ac37a47eb48ce50d..6008a30a17d0b2fcfd4038b37fd4fdac8e481d72 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -426,12 +426,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) struct linehandle_state *lh; struct file *file; int fd, i, count = 0, ret; + u32 lflags; if (copy_from_user(&handlereq, ip, sizeof(handlereq))) return -EFAULT; if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) return -EINVAL; + lflags = handlereq.flags; + + /* + * Do not allow both INPUT & OUTPUT flags to be set as they are + * contradictory. + */ + if ((lflags & GPIOHANDLE_REQUEST_INPUT) && + (lflags & GPIOHANDLE_REQUEST_OUTPUT)) + return -EINVAL; + lh = kzalloc(sizeof(*lh), GFP_KERNEL); if (!lh) return -ENOMEM; @@ -452,7 +463,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) /* Request each GPIO */ for (i = 0; i < handlereq.lines; i++) { u32 offset = handlereq.lineoffsets[i]; - u32 lflags = handlereq.flags; struct gpio_desc *desc; if (offset >= gdev->ngpio) { @@ -787,7 +797,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) } /* This is just wrong: we don't look for events on output lines */ - if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) || + (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) || + (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) { ret = -EINVAL; goto out_free_label; } @@ -801,10 +813,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); ret = gpiod_direction_input(desc); if (ret) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2f936a76e44092875e8d4e1bd79145aad2f93161..78591eab134abb254614643c1cf268c88578642f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -83,6 +83,16 @@ config DRM_LOAD_EDID_FIRMWARE default case is N. Details and instructions how to build your own EDID data are given in Documentation/EDID/HOWTO.txt. +config DRM_DP_CEC + bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" + select CEC_CORE + help + Choose this option if you want to enable HDMI CEC support for + DisplayPort/USB-C to HDMI adapters. + + Note: not all adapters support this feature, and even for those + that do support this they often do not hook up the CEC pin. + config DRM_TTM tristate depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 25c720454017e69a004fccfd7efe8147cdf60b46..4a7766c18b82cb8be1ee84bdb229dc47237e8c9e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -33,6 +33,7 @@ drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o +drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3938fca1ea8e5f4c69fd5e0746fcc7ec60c6d2f1..24941a7b659f48df5dcb469a2540d2cc817bf1e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -430,6 +430,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + if (info->read_mmr_reg.count > 128) + return -EINVAL; + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); if (!regs) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 327bdf13e8bc864231f697a91c1cf891eb1f44c9..b0beb5e537bcb040358826d53bbf56abb67d4185 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1606,7 +1606,7 @@ static void si_program_aspm(struct amdgpu_device *adev) if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) { + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) { orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); data &= ~PLL_RAMP_UP_TIME_0_MASK; if (orig != data) @@ -1655,14 +1655,14 @@ static void si_program_aspm(struct amdgpu_device *adev) orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); data &= ~LS2_EXIT_TIME_MASK; - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) data |= LS2_EXIT_TIME(5); if (orig != data) si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 3907439417e76c024910e69609ec7cf2f8aff96d..c0db3b57dfe582d08ab08c565f1d8c7842573da8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3739,6 +3739,11 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr) data->frame_time_x2 = frame_time_in_us * 2 / 100; + if (data->frame_time_x2 < 280) { + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); + data->frame_time_x2 = 280; + } + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 80993a8734e084f9fa1670816a21bd34d352278d..8b6f8fac92e89d5355d737f7ec8b80a71cc99a93 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -300,7 +300,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct tc_data *tc = aux_to_tc(aux); - size_t size = min_t(size_t, 8, msg->size); + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size); u8 request = msg->request & ~DP_AUX_I2C_MOT; u8 *buf = msg->buffer; u32 tmp = 0; diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c new file mode 100644 index 0000000000000000000000000000000000000000..ddb1c5adebb96a720de6fedccf707c1cbf5ce860 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_cec.c @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DisplayPort CEC-Tunneling-over-AUX support + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include +#include +#include +#include + +/* + * Unfortunately it turns out that we have a chicken-and-egg situation + * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters + * have a converter chip that supports CEC-Tunneling-over-AUX (usually the + * Parade PS176), but they do not wire up the CEC pin, thus making CEC + * useless. + * + * Sadly there is no way for this driver to know this. What happens is + * that a /dev/cecX device is created that is isolated and unable to see + * any of the other CEC devices. Quite literally the CEC wire is cut + * (or in this case, never connected in the first place). + * + * The reason so few adapters support this is that this tunneling protocol + * was never supported by any OS. So there was no easy way of testing it, + * and no incentive to correctly wire up the CEC pin. + * + * Hopefully by creating this driver it will be easier for vendors to + * finally fix their adapters and test the CEC functionality. + * + * I keep a list of known working adapters here: + * + * https://hverkuil.home.xs4all.nl/cec-status.txt + * + * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works + * and is not yet listed there. + * + * Note that the current implementation does not support CEC over an MST hub. + * As far as I can see there is no mechanism defined in the DisplayPort + * standard to transport CEC interrupts over an MST device. It might be + * possible to do this through polling, but I have not been able to get that + * to work. + */ + +/** + * DOC: dp cec helpers + * + * These functions take care of supporting the CEC-Tunneling-over-AUX + * feature of DisplayPort-to-HDMI adapters. + */ + +/* + * When the EDID is unset because the HPD went low, then the CEC DPCD registers + * typically can no longer be read (true for a DP-to-HDMI adapter since it is + * powered by the HPD). However, some displays toggle the HPD off and on for a + * short period for one reason or another, and that would cause the CEC adapter + * to be removed and added again, even though nothing else changed. + * + * This module parameter sets a delay in seconds before the CEC adapter is + * actually unregistered. Only if the HPD does not return within that time will + * the CEC adapter be unregistered. + * + * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never + * be unregistered for as long as the connector remains registered. + * + * If it is set to 0, then the CEC adapter will be unregistered immediately as + * soon as the HPD disappears. + * + * The default is one second to prevent short HPD glitches from unregistering + * the CEC adapter. + * + * Note that for integrated HDMI branch devices that support CEC the DPCD + * registers remain available even if the HPD goes low since it is not powered + * by the HPD. In that case the CEC adapter will never be unregistered during + * the life time of the connector. At least, this is the theory since I do not + * have hardware with an integrated HDMI branch device that supports CEC. + */ +#define NEVER_UNREG_DELAY 1000 +static unsigned int drm_dp_cec_unregister_delay = 1; +module_param(drm_dp_cec_unregister_delay, uint, 0600); +MODULE_PARM_DESC(drm_dp_cec_unregister_delay, + "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister"); + +static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0; + ssize_t err = 0; + + err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + return (enable && err < 0) ? err : 0; +} + +static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + /* Bit 15 (logical address 15) should always be set */ + u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST; + u8 mask[2]; + ssize_t err; + + if (addr != CEC_LOG_ADDR_INVALID) + la_mask |= adap->log_addrs.log_addr_mask | (1 << addr); + mask[0] = la_mask & 0xff; + mask[1] = la_mask >> 8; + err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); + return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0; +} + +static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + unsigned int retries = min(5, attempts - 1); + ssize_t err; + + err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER, + msg->msg, msg->len); + if (err < 0) + return err; + + err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO, + (msg->len - 1) | (retries << 4) | + DP_CEC_TX_MESSAGE_SEND); + return err < 0 ? err : 0; +} + +static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, + bool enable) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + ssize_t err; + u8 val; + + if (!(adap->capabilities & CEC_CAP_MONITOR_ALL)) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val); + if (err >= 0) { + if (enable) + val |= DP_CEC_SNOOPING_ENABLE; + else + val &= ~DP_CEC_SNOOPING_ENABLE; + err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + } + return (enable && err < 0) ? err : 0; +} + +static void drm_dp_cec_adap_status(struct cec_adapter *adap, + struct seq_file *file) +{ + struct drm_dp_aux *aux = cec_get_drvdata(adap); + struct drm_dp_desc desc; + struct drm_dp_dpcd_ident *id = &desc.ident; + + if (drm_dp_read_desc(aux, &desc, true)) + return; + seq_printf(file, "OUI: %*pdH\n", + (int)sizeof(id->oui), id->oui); + seq_printf(file, "ID: %*pE\n", + (int)strnlen(id->device_id, sizeof(id->device_id)), + id->device_id); + seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf); + /* + * Show this both in decimal and hex: at least one vendor + * always reports this in hex. + */ + seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n", + id->sw_major_rev, id->sw_minor_rev, + id->sw_major_rev, id->sw_minor_rev); +} + +static const struct cec_adap_ops drm_dp_cec_adap_ops = { + .adap_enable = drm_dp_cec_adap_enable, + .adap_log_addr = drm_dp_cec_adap_log_addr, + .adap_transmit = drm_dp_cec_adap_transmit, + .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable, + .adap_status = drm_dp_cec_adap_status, +}; + +static int drm_dp_cec_received(struct drm_dp_aux *aux) +{ + struct cec_adapter *adap = aux->cec.adap; + struct cec_msg msg; + u8 rx_msg_info; + ssize_t err; + + err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); + if (err < 0) + return err; + + if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED)) + return 0; + + msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1; + err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); + if (err < 0) + return err; + + cec_received_msg(adap, &msg); + return 0; +} + +static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) +{ + struct cec_adapter *adap = aux->cec.adap; + u8 flags; + + if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) + return; + + if (flags & DP_CEC_RX_MESSAGE_INFO_VALID) + drm_dp_cec_received(aux); + + if (flags & DP_CEC_TX_MESSAGE_SENT) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK); + else if (flags & DP_CEC_TX_LINE_ERROR) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR | + CEC_TX_STATUS_MAX_RETRIES); + else if (flags & + (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR)) + cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK | + CEC_TX_STATUS_MAX_RETRIES); + drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); +} + +/** + * drm_dp_cec_irq() - handle CEC interrupt, if any + * @aux: DisplayPort AUX channel + * + * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX + * is present, then it will check for a CEC_IRQ and handle it accordingly. + */ +void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ + u8 cec_irq; + int ret; + + mutex_lock(&aux->cec.lock); + if (!aux->cec.adap) + goto unlock; + + ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, + &cec_irq); + if (ret < 0 || !(cec_irq & DP_CEC_IRQ)) + goto unlock; + + drm_dp_cec_handle_irq(aux); + drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_irq); + +static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap) +{ + u8 cap = 0; + + if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 || + !(cap & DP_CEC_TUNNELING_CAPABLE)) + return false; + if (cec_cap) + *cec_cap = cap; + return true; +} + +/* + * Called if the HPD was low for more than drm_dp_cec_unregister_delay + * seconds. This unregisters the CEC adapter. + */ +static void drm_dp_cec_unregister_work(struct work_struct *work) +{ + struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux, + cec.unregister_work.work); + + mutex_lock(&aux->cec.lock); + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; + mutex_unlock(&aux->cec.lock); +} + +/* + * A new EDID is set. If there is no CEC adapter, then create one. If + * there was a CEC adapter, then check if the CEC adapter properties + * were unchanged and just update the CEC physical address. Otherwise + * unregister the old CEC adapter and create a new one. + */ +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) +{ + u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD; + unsigned int num_las = 1; + u8 cap; + +#ifndef CONFIG_MEDIA_CEC_RC + /* + * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by + * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined. + * + * Do this here as well to ensure the tests against cec_caps are + * correct. + */ + cec_caps &= ~CEC_CAP_RC; +#endif + cancel_delayed_work_sync(&aux->cec.unregister_work); + + mutex_lock(&aux->cec.lock); + if (!drm_dp_cec_cap(aux, &cap)) { + /* CEC is not supported, unregister any existing adapter */ + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; + goto unlock; + } + + if (cap & DP_CEC_SNOOPING_CAPABLE) + cec_caps |= CEC_CAP_MONITOR_ALL; + if (cap & DP_CEC_MULTIPLE_LA_CAPABLE) + num_las = CEC_MAX_LOG_ADDRS; + + if (aux->cec.adap) { + if (aux->cec.adap->capabilities == cec_caps && + aux->cec.adap->available_log_addrs == num_las) { + /* Unchanged, so just set the phys addr */ + cec_s_phys_addr_from_edid(aux->cec.adap, edid); + goto unlock; + } + /* + * The capabilities changed, so unregister the old + * adapter first. + */ + cec_unregister_adapter(aux->cec.adap); + } + + /* Create a new adapter */ + aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops, + aux, aux->cec.name, cec_caps, + num_las); + if (IS_ERR(aux->cec.adap)) { + aux->cec.adap = NULL; + goto unlock; + } + if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) { + cec_delete_adapter(aux->cec.adap); + aux->cec.adap = NULL; + } else { + /* + * Update the phys addr for the new CEC adapter. When called + * from drm_dp_cec_register_connector() edid == NULL, so in + * that case the phys addr is just invalidated. + */ + cec_s_phys_addr_from_edid(aux->cec.adap, edid); + } +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_set_edid); + +/* + * The EDID disappeared (likely because of the HPD going down). + */ +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ + cancel_delayed_work_sync(&aux->cec.unregister_work); + + mutex_lock(&aux->cec.lock); + if (!aux->cec.adap) + goto unlock; + + cec_phys_addr_invalidate(aux->cec.adap); + /* + * We're done if we want to keep the CEC device + * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the + * DPCD still indicates the CEC capability (expected for an integrated + * HDMI branch device). + */ + if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY && + !drm_dp_cec_cap(aux, NULL)) { + /* + * Unregister the CEC adapter after drm_dp_cec_unregister_delay + * seconds. This to debounce short HPD off-and-on cycles from + * displays. + */ + schedule_delayed_work(&aux->cec.unregister_work, + drm_dp_cec_unregister_delay * HZ); + } +unlock: + mutex_unlock(&aux->cec.lock); +} +EXPORT_SYMBOL(drm_dp_cec_unset_edid); + +/** + * drm_dp_cec_register_connector() - register a new connector + * @aux: DisplayPort AUX channel + * @name: name of the CEC device + * @parent: parent device + * + * A new connector was registered with associated CEC adapter name and + * CEC adapter parent device. After registering the name and parent + * drm_dp_cec_set_edid() is called to check if the connector supports + * CEC and to register a CEC adapter if that is the case. + */ +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, + struct device *parent) +{ + WARN_ON(aux->cec.adap); + aux->cec.name = name; + aux->cec.parent = parent; + INIT_DELAYED_WORK(&aux->cec.unregister_work, + drm_dp_cec_unregister_work); + + drm_dp_cec_set_edid(aux, NULL); +} +EXPORT_SYMBOL(drm_dp_cec_register_connector); + +/** + * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any + * @aux: DisplayPort AUX channel + */ +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ + if (!aux->cec.adap) + return; + cancel_delayed_work_sync(&aux->cec.unregister_work); + cec_unregister_adapter(aux->cec.adap); + aux->cec.adap = NULL; +} +EXPORT_SYMBOL(drm_dp_cec_unregister_connector); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 876a2d99666bf222ad8cde951e263bb311ffd1bd..dde0623b77b87189b1483f81afbf8fa77923bb2b 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -990,6 +990,7 @@ static const struct i2c_lock_operations drm_dp_i2c_lock_ops = { void drm_dp_aux_init(struct drm_dp_aux *aux) { mutex_init(&aux->hw_mutex); + mutex_init(&aux->cec.lock); aux->ddc.algo = &drm_dp_i2c_algo; aux->ddc.algo_data = aux; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 12a5c2ea5c0962b3ac142bfca16573931b36fbdd..5ba527a14b7596729cfa2544f290647a1ec154c6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -168,6 +168,9 @@ static const struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 48e99ab525c363dbbf78f55b540d49d17bbd3769..ae5c0952a7a3b6b84f5283d0514df4bbd5bde08d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -996,6 +996,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return -EACCES; } + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags &= ~VM_MAYWRITE; + } + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d7822bef1986983a53925ad563f9303c6820f1b8..b33d39d9dd14d897c9a83020944f75e8cb81e557 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -387,6 +387,9 @@ static void output_poll_execute(struct work_struct *work) enum drm_connector_status old_status; bool repoll = false, changed; + if (!dev->mode_config.poll_enabled) + return; + /* Pick up any changes detected by the probe functions. */ changed = dev->mode_config.delayed_event; dev->mode_config.delayed_event = false; @@ -550,7 +553,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); */ void drm_kms_helper_poll_fini(struct drm_device *dev) { - drm_kms_helper_poll_disable(dev); + if (!dev->mode_config.poll_enabled) + return; + + dev->mode_config.poll_enabled = false; + cancel_delayed_work_sync(&dev->mode_config.output_poll_work); } EXPORT_SYMBOL(drm_kms_helper_poll_fini); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 70980f82a15b3f76ef419fca1a1cb78875ae74f9..1e104518192da4cf7b5ff0eb2832095880d52a98 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -26,6 +26,7 @@ */ #include "i915_drv.h" +#include "intel_ringbuffer.h" /** * DOC: batch buffer command parser @@ -50,13 +51,11 @@ * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should - * only be used by the kernel driver. The parser generally rejects such - * commands, though it may allow some from the drm master process. + * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser - * provides a whitelist of registers which userspace may safely access (for both - * normal and drm master processes). + * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. @@ -81,11 +80,104 @@ * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories - * mentioned above: rejected, master-only, register whitelist. The parser - * implements a number of checks, including the privileged memory checks, via a - * general bitmasking mechanism. + * mentioned above: rejected, register whitelist. The parser implements a number + * of checks, including the privileged memory checks, via a general bitmasking + * mechanism. */ +/* + * A command that requires special handling by the command parser. + */ +struct drm_i915_cmd_descriptor { + /* + * Flags describing how the command parser processes the command. + * + * CMD_DESC_FIXED: The command has a fixed length if this is set, + * a length mask if not set + * CMD_DESC_SKIP: The command is allowed but does not follow the + * standard length encoding for the opcode range in + * which it falls + * CMD_DESC_REJECT: The command is never allowed + * CMD_DESC_REGISTER: The command should be checked against the + * register whitelist for the appropriate ring + */ + u32 flags; +#define CMD_DESC_FIXED (1<<0) +#define CMD_DESC_SKIP (1<<1) +#define CMD_DESC_REJECT (1<<2) +#define CMD_DESC_REGISTER (1<<3) +#define CMD_DESC_BITMASK (1<<4) + + /* + * The command's unique identification bits and the bitmask to get them. + * This isn't strictly the opcode field as defined in the spec and may + * also include type, subtype, and/or subop fields. + */ + struct { + u32 value; + u32 mask; + } cmd; + + /* + * The command's length. The command is either fixed length (i.e. does + * not include a length field) or has a length field mask. The flag + * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has + * a length mask. All command entries in a command table must include + * length information. + */ + union { + u32 fixed; + u32 mask; + } length; + + /* + * Describes where to find a register address in the command to check + * against the ring's register whitelist. Only valid if flags has the + * CMD_DESC_REGISTER bit set. + * + * A non-zero step value implies that the command may access multiple + * registers in sequence (e.g. LRI), in that case step gives the + * distance in dwords between individual offset fields. + */ + struct { + u32 offset; + u32 mask; + u32 step; + } reg; + +#define MAX_CMD_DESC_BITMASKS 3 + /* + * Describes command checks where a particular dword is masked and + * compared against an expected value. If the command does not match + * the expected value, the parser rejects it. Only valid if flags has + * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero + * are valid. + * + * If the check specifies a non-zero condition_mask then the parser + * only performs the check when the bits specified by condition_mask + * are non-zero. + */ + struct { + u32 offset; + u32 mask; + u32 expected; + u32 condition_offset; + u32 condition_mask; + } bits[MAX_CMD_DESC_BITMASKS]; +}; + +/* + * A table of commands requiring special handling by the command parser. + * + * Each engine has an array of tables. Each table consists of an array of + * command descriptors, which must be sorted with command opcodes in + * ascending order. + */ +struct drm_i915_cmd_table { + const struct drm_i915_cmd_descriptor *table; + int count; +}; + #define STD_MI_OPCODE_SHIFT (32 - 9) #define STD_3D_OPCODE_SHIFT (32 - 16) #define STD_2D_OPCODE_SHIFT (32 - 10) @@ -95,7 +187,7 @@ #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), ~0u << (opm) }, \ + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } @@ -110,14 +202,13 @@ #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK -#define M CMD_DESC_MASTER /* Command Mask Fixed Len Action ---------------------------------------------------------- */ -static const struct drm_i915_cmd_descriptor common_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = { CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ), CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), @@ -147,7 +238,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), }; -static const struct drm_i915_cmd_descriptor render_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = { CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_PREDICATE, SMI, F, 1, S ), @@ -214,7 +305,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ), - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), @@ -231,7 +322,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), }; -static const struct drm_i915_cmd_descriptor video_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -275,7 +366,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { CMD( MFX_WAIT, SMFX, F, 1, S ), }; -static const struct drm_i915_cmd_descriptor vecs_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -313,7 +404,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { }}, ), }; -static const struct drm_i915_cmd_descriptor blt_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = { CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, .bits = {{ @@ -347,10 +438,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { }; static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +/* + * For Gen9 we can still rely on the h/w to enforce cmd security, and only + * need to re-enforce the register access checks. We therefore only need to + * teach the cmdparser how to find the end of each command, and identify + * register accesses. The table doesn't need to reject any commands, and so + * the only commands listed here are: + * 1) Those that touch registers + * 2) Those that do not have the default 8-bit length + * + * Note that the default MI length mask chosen for this table is 0xFF, not + * the 0x3F used on older devices. This is because the vast majority of MI + * cmds on Gen9 use a standard 8-bit Length field. + * All the Gen9 blitter instructions are standard 0xFF length mask, and + * none allow access to non-general registers, so in fact no BLT cmds are + * included in the table at all. + * + */ +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = { + CMD( MI_NOOP, SMI, F, 1, S ), + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ), + CMD( MI_FLUSH, SMI, F, 1, S ), + CMD( MI_ARB_CHECK, SMI, F, 1, S ), + CMD( MI_REPORT_HEAD, SMI, F, 1, S ), + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ), + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ), + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ), + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ), + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), + + /* + * We allow BB_START but apply further checks. We just sanitize the + * basic fields here. + */ +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1) + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B, + .bits = {{ + .offset = 0, + .mask = MI_BB_START_OPERAND_MASK, + .expected = MI_BB_START_OPERAND_EXPECT, + }}, ), +}; + static const struct drm_i915_cmd_descriptor noop_desc = CMD(MI_NOOP, SMI, F, 1, S); @@ -364,40 +509,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = #undef R #undef W #undef B -#undef M -static const struct drm_i915_cmd_table gen7_render_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, }; -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, }; -static const struct drm_i915_cmd_table gen7_video_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { video_cmds, ARRAY_SIZE(video_cmds) }, +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) }, }; -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) }, }; -static const struct drm_i915_cmd_table gen7_blt_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, }; -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, }; +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = { + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) }, +}; + + /* * Register whitelists, sorted by increasing register offset. */ @@ -450,7 +599,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), - REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */ REG64(MI_PREDICATE_SRC0), REG64(MI_PREDICATE_SRC1), REG32(GEN7_3DPRIM_END_OFFSET), @@ -514,17 +662,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; -static const struct drm_i915_reg_descriptor ivb_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), -}; - -static const struct drm_i915_reg_descriptor hsw_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), + REG64_IDX(BCS_GPR, 3), + REG64_IDX(BCS_GPR, 4), + REG64_IDX(BCS_GPR, 5), + REG64_IDX(BCS_GPR, 6), + REG64_IDX(BCS_GPR, 7), + REG64_IDX(BCS_GPR, 8), + REG64_IDX(BCS_GPR, 9), + REG64_IDX(BCS_GPR, 10), + REG64_IDX(BCS_GPR, 11), + REG64_IDX(BCS_GPR, 12), + REG64_IDX(BCS_GPR, 13), + REG64_IDX(BCS_GPR, 14), + REG64_IDX(BCS_GPR, 15), }; #undef REG64 @@ -533,33 +691,32 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { struct drm_i915_reg_table { const struct drm_i915_reg_descriptor *regs; int num_regs; - bool master; }; static const struct drm_i915_reg_table ivb_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, }; static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, }; static const struct drm_i915_reg_table hsw_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) }, }; static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, +}; + +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = { + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) }, }; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; u32 subclient = (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; @@ -578,7 +735,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; u32 subclient = (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; @@ -601,7 +758,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; if (client == INSTR_MI_CLIENT) return 0x3F; @@ -612,6 +769,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + static bool validate_cmds_sorted(const struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) @@ -703,22 +871,15 @@ struct cmd_node { */ static inline u32 cmd_header_key(u32 x) { - u32 shift; - switch (x >> INSTR_CLIENT_SHIFT) { default: case INSTR_MI_CLIENT: - shift = STD_MI_OPCODE_SHIFT; - break; + return x >> STD_MI_OPCODE_SHIFT; case INSTR_RC_CLIENT: - shift = STD_3D_OPCODE_SHIFT; - break; + return x >> STD_3D_OPCODE_SHIFT; case INSTR_BC_CLIENT: - shift = STD_2D_OPCODE_SHIFT; - break; + return x >> STD_2D_OPCODE_SHIFT; } - - return x >> shift; } static int init_hash_table(struct intel_engine_cs *engine, @@ -776,18 +937,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->i915)) + if (!IS_GEN7(engine->i915) && !(IS_GEN9(engine->i915) && + engine->id == BCS)) return; switch (engine->id) { case RCS: if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_render_ring_cmds; + cmd_tables = hsw_render_ring_cmd_table; cmd_table_count = - ARRAY_SIZE(hsw_render_ring_cmds); + ARRAY_SIZE(hsw_render_ring_cmd_table); } else { - cmd_tables = gen7_render_cmds; - cmd_table_count = ARRAY_SIZE(gen7_render_cmds); + cmd_tables = gen7_render_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table); } if (IS_HASWELL(engine->i915)) { @@ -797,36 +959,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->reg_tables = ivb_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VCS: - cmd_tables = gen7_video_cmds; - cmd_table_count = ARRAY_SIZE(gen7_video_cmds); + cmd_tables = gen7_video_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_blt_ring_cmds; - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + if (IS_GEN9(engine->i915)) { + cmd_tables = gen9_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table); + engine->get_cmd_length_mask = + gen9_blt_get_cmd_length_mask; + + /* BCS Engine unsafe without parser */ + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER; + } else if (IS_HASWELL(engine->i915)) { + cmd_tables = hsw_blt_ring_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table); } else { - cmd_tables = gen7_blt_cmds; - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); + cmd_tables = gen7_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table); } - if (IS_HASWELL(engine->i915)) { + if (IS_GEN9(engine->i915)) { + engine->reg_tables = gen9_blt_reg_tables; + engine->reg_table_count = + ARRAY_SIZE(gen9_blt_reg_tables); + } else if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { engine->reg_tables = ivb_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VECS: - cmd_tables = hsw_vebox_cmds; - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); + cmd_tables = hsw_vebox_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; @@ -852,7 +1024,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) return; } - engine->needs_cmd_parser = true; + engine->flags |= I915_ENGINE_USING_CMD_PARSER; } /** @@ -864,7 +1036,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) */ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { - if (!engine->needs_cmd_parser) + if (!intel_engine_using_cmd_parser(engine)) return; fini_hash_table(engine); @@ -938,22 +1110,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) } static const struct drm_i915_reg_descriptor * -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, u32 addr) { const struct drm_i915_reg_table *table = engine->reg_tables; + const struct drm_i915_reg_descriptor *reg = NULL; int count = engine->reg_table_count; - do { - if (!table->master || is_master) { - const struct drm_i915_reg_descriptor *reg; - - reg = __find_reg(table->regs, table->num_regs, addr); - if (reg != NULL) - return reg; - } - } while (table++, --count); + for (; !reg && (count > 0); ++table, --count) + reg = __find_reg(table->regs, table->num_regs, addr); - return NULL; + return reg; } /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ @@ -1036,32 +1202,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, return dst; } -/** - * intel_engine_needs_cmd_parser() - should a given engine use software - * command parsing? - * @engine: the engine in question - * - * Only certain platforms require software batch buffer command parsing, and - * only when enabled via module parameter. - * - * Return: true if the engine requires software command parsing - */ -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine) -{ - if (!engine->needs_cmd_parser) - return false; - - if (!USES_PPGTT(engine->i915)) - return false; - - return (i915.enable_cmd_parser == 1); -} - static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, - const u32 *cmd, u32 length, - const bool is_master, - bool *oacontrol_set) + const u32 *cmd, u32 length) { if (desc->flags & CMD_DESC_SKIP) return true; @@ -1071,12 +1214,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - if ((desc->flags & CMD_DESC_MASTER) && !is_master) { - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", - *cmd); - return false; - } - if (desc->flags & CMD_DESC_REGISTER) { /* * Get the distance between individual register offset @@ -1090,7 +1227,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(engine, is_master, reg_addr); + find_reg(engine, reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n", @@ -1098,31 +1235,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - /* - * OACONTROL requires some special handling for - * writes. We want to make sure that any batch which - * enables OA also disables it before the end of the - * batch. The goal is to prevent one process from - * snooping on the perf data from another process. To do - * that, we need to check the value that will be written - * to the register. Hence, limit OACONTROL writes to - * only MI_LOAD_REGISTER_IMM commands. - */ - if (reg_addr == i915_mmio_reg_offset(OACONTROL)) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { - DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); - return false; - } - - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { - DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); - return false; - } - - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) - *oacontrol_set = (cmd[offset + 1] != 0); - } - /* * Check the value written to the register against the * allowed mask/value pair given in the whitelist entry. @@ -1170,6 +1282,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, continue; } + if (desc->bits[i].offset >= length) { + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n", + *cmd, engine->name); + return false; + } + dword = cmd[desc->bits[i].offset] & desc->bits[i].mask; @@ -1187,16 +1305,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, return true; } +static int check_bbstart(const struct i915_gem_context *ctx, + u32 *cmd, u32 offset, u32 length, + u32 batch_len, + u64 batch_start, + u64 shadow_batch_start) +{ + u64 jump_offset, jump_target; + u32 target_cmd_offset, target_cmd_index; + + /* For igt compatibility on older platforms */ + if (CMDPARSER_USES_GGTT(ctx->i915)) { + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); + return -EACCES; + } + + if (length != 3) { + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n", + length); + return -EINVAL; + } + + jump_target = *(u64*)(cmd+1); + jump_offset = jump_target - batch_start; + + /* + * Any underflow of jump_target is guaranteed to be outside the range + * of a u32, so >= test catches both too large and too small + */ + if (jump_offset >= batch_len) { + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n", + jump_target); + return -EINVAL; + } + + /* + * This cannot overflow a u32 because we already checked jump_offset + * is within the BB, and the batch_len is a u32 + */ + target_cmd_offset = lower_32_bits(jump_offset); + target_cmd_index = target_cmd_offset / sizeof(u32); + + *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset; + + if (target_cmd_index == offset) + return 0; + + if (ctx->jump_whitelist_cmds <= target_cmd_index) { + DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n"); + return -EINVAL; + } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) { + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", + jump_target); + return -EINVAL; + } + + return 0; +} + +static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len) +{ + const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32)); + const u32 exact_size = BITS_TO_LONGS(batch_cmds); + u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds)); + unsigned long *next_whitelist; + + if (CMDPARSER_USES_GGTT(ctx->i915)) + return; + + if (batch_cmds <= ctx->jump_whitelist_cmds) { + bitmap_zero(ctx->jump_whitelist, batch_cmds); + return; + } + +again: + next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL); + if (next_whitelist) { + kfree(ctx->jump_whitelist); + ctx->jump_whitelist = next_whitelist; + ctx->jump_whitelist_cmds = + next_size * BITS_PER_BYTE * sizeof(long); + return; + } + + if (next_size > exact_size) { + next_size = exact_size; + goto again; + } + + DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n"); + bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds); + + return; +} + #define LENGTH_BIAS 2 /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * @ctx: the context in which the batch is to execute * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question - * @shadow_batch_obj: copy of the batch buffer in question + * @batch_start: Canonical base address of batch * @batch_start_offset: byte offset in the batch at which execution starts * @batch_len: length of the commands in batch_obj - * @is_master: is the submitting process the drm master? + * @shadow_batch_obj: copy of the batch buffer in question + * @shadow_batch_start: Canonical base address of shadow_batch_obj * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1204,17 +1418,19 @@ static bool check_cmd(const struct intel_engine_cs *engine, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int intel_engine_cmd_parser(struct intel_engine_cs *engine, + +int intel_engine_cmd_parser(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master) + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start) { - u32 *cmd, *batch_end; + u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; - bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ bool needs_clflush_after = false; int ret = 0; @@ -1226,13 +1442,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, return PTR_ERR(cmd); } + init_whitelist(ctx, batch_len); + /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way. */ batch_end = cmd + (batch_len / sizeof(*batch_end)); - while (cmd < batch_end) { + do { u32 length; if (*cmd == MI_BATCH_BUFFER_END) @@ -1243,17 +1461,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); ret = -EINVAL; - break; - } - - /* - * If the batch buffer contains a chained batch, return an - * error that tells the caller to abort and dispatch the - * workload as a non-secure batch. - */ - if (desc->cmd.value == MI_BATCH_BUFFER_START) { - ret = -EACCES; - break; + goto err; } if (desc->flags & CMD_DESC_FIXED) @@ -1267,32 +1475,44 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, length, batch_end - cmd); ret = -EINVAL; - break; + goto err; } - if (!check_cmd(engine, desc, cmd, length, is_master, - &oacontrol_set)) { - ret = -EINVAL; + if (!check_cmd(engine, desc, cmd, length)) { + ret = -EACCES; + goto err; + } + + if (desc->cmd.value == MI_BATCH_BUFFER_START) { + ret = check_bbstart(ctx, cmd, offset, length, + batch_len, batch_start, + shadow_batch_start); + + if (ret) + goto err; break; } - cmd += length; - } + if (ctx->jump_whitelist_cmds > offset) + set_bit(offset, ctx->jump_whitelist); - if (oacontrol_set) { - DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); - ret = -EINVAL; - } + cmd += length; + offset += length; + if (cmd >= batch_end) { + DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); + ret = -EINVAL; + goto err; + } + } while (1); - if (cmd >= batch_end) { - DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); - ret = -EINVAL; + if (needs_clflush_after) { + void *ptr = ptr_mask_bits(shadow_batch_obj->mapping); + drm_clflush_virt_range(ptr, + (void *)(cmd + 1) - ptr); } - if (ret == 0 && needs_clflush_after) - drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len); +err: i915_gem_object_unpin_map(shadow_batch_obj); - return ret; } @@ -1312,7 +1532,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_engine(engine, dev_priv) { - if (intel_engine_needs_cmd_parser(engine)) { + if (intel_engine_using_cmd_parser(engine)) { active = true; break; } @@ -1332,6 +1552,12 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. + * 8. Don't report cmd_check() failures as EINVAL errors to userspace; + * rely on the HW to NOOP disallowed commands as it would without + * the parser enabled. + * 9. Don't whitelist or handle oacontrol specially, as ownership + * for oacontrol state is moving to i915-perf. + * 10. Support for Gen9 BCS Parsing */ - return 7; + return 10; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bae62cf934cff2229b1efad2dfc7b588dcccf9d5..ff61229d963b20dd611c9f2c3adcd11205701619 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -280,7 +280,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = i915.semaphores; break; case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); + value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN); break; case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(dev_priv); @@ -1470,6 +1470,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) disable_rpm_wakeref_asserts(dev_priv); intel_display_set_init_power(dev_priv, false); + i915_rc6_ctx_wa_suspend(dev_priv); fw_csr = !IS_BROXTON(dev_priv) && suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; @@ -1706,6 +1707,8 @@ static int i915_drm_resume_early(struct drm_device *dev) else intel_display_set_init_power(dev_priv, true); + i915_rc6_ctx_wa_resume(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); out: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e23748cca0c0068ad8f50b93563c15d921561e9d..c4f155663ca9a6ef33cf9ac0e4479dda31be3d3a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -943,6 +943,13 @@ struct i915_gem_context { struct list_head link; u8 remap_slice; + + /** jump_whitelist: Bit array for tracking cmds during cmdparsing */ + unsigned long *jump_whitelist; + + /** jump_whitelist_cmds: No of cmd slots available */ + u32 jump_whitelist_cmds; + bool closed:1; }; @@ -1221,6 +1228,7 @@ struct intel_gen6_power_mgmt { bool client_boost; bool enabled; + bool ctx_corrupted; struct delayed_work autoenable_work; unsigned boosts; @@ -2339,6 +2347,18 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) __deprecated extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); +static inline void +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) +{ + obj->base.vma_node.readonly = true; +} + +static inline bool +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) +{ + return obj->base.vma_node.readonly; +} + static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { @@ -2476,102 +2496,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) -/* - * A command that requires special handling by the command parser. - */ -struct drm_i915_cmd_descriptor { - /* - * Flags describing how the command parser processes the command. - * - * CMD_DESC_FIXED: The command has a fixed length if this is set, - * a length mask if not set - * CMD_DESC_SKIP: The command is allowed but does not follow the - * standard length encoding for the opcode range in - * which it falls - * CMD_DESC_REJECT: The command is never allowed - * CMD_DESC_REGISTER: The command should be checked against the - * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master - */ - u32 flags; -#define CMD_DESC_FIXED (1<<0) -#define CMD_DESC_SKIP (1<<1) -#define CMD_DESC_REJECT (1<<2) -#define CMD_DESC_REGISTER (1<<3) -#define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) - - /* - * The command's unique identification bits and the bitmask to get them. - * This isn't strictly the opcode field as defined in the spec and may - * also include type, subtype, and/or subop fields. - */ - struct { - u32 value; - u32 mask; - } cmd; - - /* - * The command's length. The command is either fixed length (i.e. does - * not include a length field) or has a length field mask. The flag - * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has - * a length mask. All command entries in a command table must include - * length information. - */ - union { - u32 fixed; - u32 mask; - } length; - - /* - * Describes where to find a register address in the command to check - * against the ring's register whitelist. Only valid if flags has the - * CMD_DESC_REGISTER bit set. - * - * A non-zero step value implies that the command may access multiple - * registers in sequence (e.g. LRI), in that case step gives the - * distance in dwords between individual offset fields. - */ - struct { - u32 offset; - u32 mask; - u32 step; - } reg; - -#define MAX_CMD_DESC_BITMASKS 3 - /* - * Describes command checks where a particular dword is masked and - * compared against an expected value. If the command does not match - * the expected value, the parser rejects it. Only valid if flags has - * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero - * are valid. - * - * If the check specifies a non-zero condition_mask then the parser - * only performs the check when the bits specified by condition_mask - * are non-zero. - */ - struct { - u32 offset; - u32 mask; - u32 expected; - u32 condition_offset; - u32 condition_mask; - } bits[MAX_CMD_DESC_BITMASKS]; -}; - -/* - * A table of commands requiring special handling by the command parser. - * - * Each engine has an array of tables. Each table consists of an array of - * command descriptors, which must be sorted with command opcodes in - * ascending order. - */ -struct drm_i915_cmd_table { - const struct drm_i915_cmd_descriptor *table; - int count; -}; - /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ #define __I915__(p) ({ \ struct drm_i915_private *__p; \ @@ -2729,6 +2653,12 @@ struct drm_i915_cmd_table { #define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7))) #define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8))) +/* + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution + * All later gens can run the final buffer from the ppgtt + */ +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv) + #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) #define BSD_RING ENGINE_MASK(VCS) @@ -2745,6 +2675,8 @@ struct drm_i915_cmd_table { #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) + #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) #define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) @@ -2764,11 +2696,13 @@ struct drm_i915_cmd_table { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ + (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9) + /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ - IS_SKL_GT3(dev_priv) || \ - IS_SKL_GT4(dev_priv)) + (INTEL_GEN(dev_priv) == 9)) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts @@ -3098,6 +3032,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); +struct i915_vma * __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags); + int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); @@ -3551,13 +3493,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, +int intel_engine_cmd_parser(struct i915_gem_context *cxt, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 user_batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master); + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 26c4befcd234252294663c035a1a4fad5ed7d2ac..3fb4f9acacba014860af27d61239cb4f35191ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1773,6 +1773,10 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) unsigned int flags; int ret; + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >> PAGE_SHIFT; @@ -2759,6 +2763,12 @@ i915_gem_idle_work_handler(struct work_struct *work) if (INTEL_GEN(dev_priv) >= 6) gen6_rps_idle(dev_priv); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) { + i915_rc6_ctx_wa_check(dev_priv); + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + } + intel_runtime_pm_put(dev_priv); out_unlock: mutex_unlock(&dev->struct_mutex); @@ -3822,6 +3832,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 flags) { struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base; + + return i915_gem_object_pin(obj, vm, view, size, alignment, + flags | PIN_GLOBAL); +} + +struct i915_vma * +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags) +{ struct i915_vma *vma; int ret; @@ -3846,7 +3869,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin(vma, size, alignment, flags); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index df10f4e95736be96184027a7fd632523b493d1cc..5d55cd159e898351a2c298eb306642f2941c70ef 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -158,6 +158,8 @@ void i915_gem_context_free(struct kref *ctx_ref) i915_vma_put(ce->state); } + kfree(ctx->jump_whitelist); + put_pid(ctx->pid); list_del(&ctx->link); @@ -327,6 +329,9 @@ __create_hw_context(struct drm_device *dev, GEN8_CTX_ADDRESSING_MODE_SHIFT; ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); + ctx->jump_whitelist = NULL; + ctx->jump_whitelist_cmds = 0; + return ctx; err_out: diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2117f172d7a2b46d849f062b49952bc124fb4e5c..4548d89abcdc3035e35eb71167bf256a3dbb8a13 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -55,6 +55,7 @@ struct i915_execbuffer_params { struct i915_vma *batch; u32 dispatch_flags; u32 args_batch_start_offset; + u64 args_batch_len; struct intel_engine_cs *engine; struct i915_gem_context *ctx; struct drm_i915_gem_request *request; @@ -1401,41 +1402,85 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) return 0; } +static struct i915_vma* +shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + u64 flags; + + /* + * PPGTT backed shadow buffers must be mapped RO, to prevent + * post-scan tampering + */ + if (CMDPARSER_USES_GGTT(dev_priv)) { + flags = PIN_GLOBAL; + vm = &dev_priv->ggtt.base; + } else if (vm->has_read_only) { + flags = PIN_USER; + i915_gem_object_set_readonly(obj); + } else { + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + return ERR_PTR(-EINVAL); + } + + return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); +} + static struct i915_vma * i915_gem_execbuffer_parse(struct intel_engine_cs *engine, struct drm_i915_gem_exec_object2 *shadow_exec_entry, - struct drm_i915_gem_object *batch_obj, + struct i915_execbuffer_params *params, struct eb_vmas *eb, - u32 batch_start_offset, - u32 batch_len, - bool is_master) + struct i915_address_space *vm) { + struct drm_i915_gem_object *batch_obj = params->batch->obj; struct drm_i915_gem_object *shadow_batch_obj; struct i915_vma *vma; + u64 batch_start; + u32 batch_start_offset = params->args_batch_start_offset; + u32 batch_len = params->args_batch_len; + u64 shadow_batch_start; int ret; + shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool, PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) return ERR_CAST(shadow_batch_obj); - ret = intel_engine_cmd_parser(engine, + vma = shadow_batch_pin(shadow_batch_obj, vm); + if (IS_ERR(vma)) + goto out; + + batch_start = gen8_canonical_addr(params->batch->node.start) + + batch_start_offset; + shadow_batch_start = gen8_canonical_addr(vma->node.start); + + ret = intel_engine_cmd_parser(params->ctx, + engine, batch_obj, - shadow_batch_obj, + batch_start, batch_start_offset, batch_len, - is_master); + shadow_batch_obj, + shadow_batch_start); if (ret) { - if (ret == -EACCES) /* unhandled chained batch */ + i915_vma_unpin(vma); + + /* + * Unsafe GGTT-backed buffers can still be submitted safely + * as non-secure. + * For PPGTT backing however, we have no choice but to forcibly + * reject unsafe buffers + */ + if (CMDPARSER_USES_GGTT(eb->i915) && (ret == -EACCES)) + /* Execute original buffer non-secure */ vma = NULL; else vma = ERR_PTR(ret); - goto out; - } - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) goto out; + } memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); @@ -1476,13 +1521,10 @@ execbuf_submit(struct i915_execbuffer_params *params, return ret; } - exec_len = args->batch_len; + exec_len = params->args_batch_len; exec_start = params->batch->node.start + params->args_batch_start_offset; - if (exec_len == 0) - exec_len = params->batch->size - params->args_batch_start_offset; - ret = params->engine->emit_bb_start(params->request, exec_start, exec_len, params->dispatch_flags); @@ -1601,8 +1643,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dispatch_flags = 0; if (args->flags & I915_EXEC_SECURE) { + if (INTEL_GEN(dev_priv) >= 11) + return -ENODEV; + + /* Return -EPERM to trigger fallback code on old binaries. */ + if (!HAS_SECURE_BATCHES(dev_priv)) + return -EPERM; + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; dispatch_flags |= I915_DISPATCH_SECURE; } @@ -1710,32 +1759,26 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } + params->ctx = ctx; params->args_batch_start_offset = args->batch_start_offset; - if (intel_engine_needs_cmd_parser(engine) && args->batch_len) { + params->args_batch_len = args->batch_len; + if (args->batch_len == 0) + params->args_batch_len = params->batch->size - params->args_batch_start_offset; + + if (intel_engine_requires_cmd_parser(engine) || + (intel_engine_using_cmd_parser(engine) && args->batch_len)) { struct i915_vma *vma; vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, - params->batch->obj, - eb, - args->batch_start_offset, - args->batch_len, - drm_is_current_master(file)); + params, eb, vm); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; } if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - dispatch_flags |= I915_DISPATCH_SECURE; + if (CMDPARSER_USES_GGTT(dev_priv)) + dispatch_flags |= I915_DISPATCH_SECURE; params->args_batch_start_offset = 0; params->batch = vma; } @@ -1798,7 +1841,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->file = file; params->engine = engine; params->dispatch_flags = dispatch_flags; - params->ctx = ctx; ret = execbuf_submit(params, args, &eb->vmas); err_request: diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0bb4232f66bcac1ad9653c073c1cbe7cb7375c16..16f56f14f4d060013cde31bfc010edd6867b0177 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -140,7 +140,8 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; - if (enable_ppgtt == 1) + /* Full PPGTT is required by the Gen9 cmdparser */ + if (enable_ppgtt == 1 && INTEL_GEN(dev_priv) != 9) return 1; if (enable_ppgtt == 2 && has_full_ppgtt) @@ -177,8 +178,8 @@ static int ppgtt_bind_vma(struct i915_vma *vma, vma->pages = vma->obj->pages; - /* Currently applicable only to VLV */ - if (vma->obj->gt_ro) + /* Applicable to VLV, and gen8+ */ + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, @@ -197,11 +198,14 @@ static void ppgtt_unbind_vma(struct i915_vma *vma) static gen8_pte_t gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 flags) { gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; pte |= addr; + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; + switch (level) { case I915_CACHE_NONE: pte |= PPAT_UNCACHED_INDEX; @@ -472,7 +476,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm, gen8_pte_t scratch_pte; scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, true); + I915_CACHE_LLC, true, 0); fill_px(vm->dev, pt, scratch_pte); } @@ -769,7 +773,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, use_scratch); + I915_CACHE_LLC, use_scratch, 0); if (!USES_FULL_48BIT_PPGTT(vm->dev)) { gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, @@ -790,7 +794,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, struct i915_page_directory_pointer *pdp, struct sg_page_iter *sg_iter, uint64_t start, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen8_pte_t *pt_vaddr; @@ -809,7 +814,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, pt_vaddr[pte] = gen8_pte_encode(sg_page_iter_dma_address(sg_iter), - cache_level, true); + cache_level, true, flags); if (++pte == GEN8_PTES) { kunmap_px(ppgtt, pt_vaddr); pt_vaddr = NULL; @@ -830,7 +835,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sg_page_iter sg_iter; @@ -839,7 +844,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, if (!USES_FULL_48BIT_PPGTT(vm->dev)) { gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, - cache_level); + cache_level, flags); } else { struct i915_page_directory_pointer *pdp; uint64_t pml4e; @@ -847,7 +852,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, - start, cache_level); + start, cache_level, flags); } } } @@ -1452,7 +1457,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) uint64_t start = ppgtt->base.start; uint64_t length = ppgtt->base.total; gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, true); + I915_CACHE_LLC, true, 0); if (!USES_FULL_48BIT_PPGTT(vm->dev)) { gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); @@ -1520,6 +1525,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.unbind_vma = ppgtt_unbind_vma; ppgtt->base.bind_vma = ppgtt_bind_vma; + + /* + * From bdw, there is support for read-only pages in the PPGTT. + * + * XXX GVT is not honouring the lack of RW in the PTE bits. + */ + ppgtt->base.has_read_only = !intel_vgpu_active(to_i915(ppgtt->base.dev)); + ppgtt->debug_dump = gen8_dump_ppgtt; if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { @@ -2321,7 +2334,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - gen8_set_pte(pte, gen8_pte_encode(addr, level, true)); + gen8_set_pte(pte, gen8_pte_encode(addr, level, true, 0)); I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); @@ -2332,7 +2345,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level level, u32 unused) + enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); @@ -2343,12 +2356,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, int rpm_atomic_seq; int i = 0; + /* The GTT does not support read-only mappings */ + GEM_BUG_ON(flags & PTE_READ_ONLY); + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); for_each_sgt_dma(addr, sgt_iter, st) { - gtt_entry = gen8_pte_encode(addr, level, true); + gtt_entry = gen8_pte_encode(addr, level, true, 0); gen8_set_pte(>t_entries[i++], gtt_entry); } @@ -2499,7 +2520,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, - use_scratch); + use_scratch, 0); for (i = 0; i < num_entries; i++) gen8_set_pte(>t_base[i], scratch_pte); readl(gtt_base); @@ -2604,8 +2625,8 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (ret) return ret; - /* Currently applicable only to VLV */ - if (obj->gt_ro) + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, @@ -2634,7 +2655,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, /* Currently applicable only to VLV */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; @@ -3193,6 +3214,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ggtt->base.total -= PAGE_SIZE; i915_address_space_init(&ggtt->base, dev_priv); ggtt->base.total += PAGE_SIZE; + + /* Only VLV supports read-only GGTT mappings */ + ggtt->base.has_read_only = IS_VALLEYVIEW(dev_priv); + if (!HAS_LLC(dev_priv)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index ec78be2f8c7727c483c751f0dddb92c7aeb90531..43a0192242eb46efd2cbfc28e8dedbe47e3c9dd7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -392,6 +392,9 @@ struct i915_address_space { */ struct list_head unbound_list; + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; + /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 8832f8ec158342ac305b603e0bd30d4353687fe3..f597261c264f4569e4b0063036a1825b86d3f1c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -558,6 +558,10 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine) return; intel_runtime_pm_get_noresume(dev_priv); + + if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + dev_priv->gt.awake = true; intel_enable_gt_powersave(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 768ad89d9cd40cf93093713f7db08f7bc10e7ebd..9d9dfe194b9b188750f32e9864fac50abdee2c91 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -49,7 +49,7 @@ struct i915_params i915 __read_mostly = { .reset = true, .invert_brightness = 0, .disable_display = 0, - .enable_cmd_parser = 1, + .enable_cmd_parser = true, .use_mmio_flip = 0, .mmio_debug = 0, .verbose_state_checks = 1, @@ -178,9 +178,9 @@ MODULE_PARM_DESC(invert_brightness, module_param_named(disable_display, i915.disable_display, bool, 0400); MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); +module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); MODULE_PARM_DESC(enable_cmd_parser, - "Enable command parsing (1=enabled [default], 0=disabled)"); + "Enable command parsing (true=enabled [default], false=disabled)"); module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); MODULE_PARM_DESC(use_mmio_flip, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 3a0dd78ddb38f88fa7b447a51702f3539c0590d1..82ac6e886eed227f2bac7ee63610913f7a1e3320 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -44,7 +44,6 @@ struct i915_params { int disable_power_well; int enable_ips; int invert_brightness; - int enable_cmd_parser; int enable_guc_loading; int enable_guc_submission; int guc_log_level; @@ -53,6 +52,7 @@ struct i915_params { int edp_vswing; unsigned int inject_load_failure; /* leave bools at the end to not create holes */ + bool enable_cmd_parser; bool enable_hangcheck; bool fastboot; bool prefault_disable; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 70d96162def66daef0722a0fb788e23da2e47ec8..5468e69bf520a0707dda3e2f149a3e6ff3b46619 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -223,6 +223,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_CONFIG0 _MMIO(0xD00) #define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1) +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_PPGTT_CACHE64B (3<<8) @@ -295,7 +297,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * Instruction field definitions used by the command parser */ #define INSTR_CLIENT_SHIFT 29 -#define INSTR_CLIENT_MASK 0xE0000000 #define INSTR_MI_CLIENT 0x0 #define INSTR_BC_CLIENT 0x2 #define INSTR_RC_CLIENT 0x3 @@ -569,6 +570,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define BCS_SWCTRL _MMIO(0x22200) +/* There are 16 GPR registers */ +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) + #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -5936,6 +5941,10 @@ enum { #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) +/* Display Internal Timeout Register */ +#define RM_TIMEOUT _MMIO(0x42060) +#define MMIO_TIMEOUT_US(us) ((us) << 0) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8aafb9601540275b987b3e67a52980e5fbc66e0d..b3af565b702758defa572e84148080b477013eb5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1730,6 +1730,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915); +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 05427d2924574fd23d18f7167f1ae1a3846fd332..07d2a8e7f78c374b402a6b88344b5f4bb5ad689a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -105,6 +105,13 @@ static void bxt_init_clock_gating(struct drm_device *dev) if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); + /* + * Lower the display internal timeout. + * This is needed to avoid any hard hangs when DSI port PLL + * is off and a MMIO access is attempted by any privilege + * application, using batch buffers or any other means. + */ + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); } static void i915_pineview_get_mem_freq(struct drm_device *dev) @@ -5149,19 +5156,23 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_i915_private *dev_priv) +static void gen6_disable_rc6(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RC_CONTROL, 0); +} + +static void gen6_disable_rps(struct drm_i915_private *dev_priv) +{ I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_i915_private *dev_priv) +static void cherryview_disable_rc6(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_i915_private *dev_priv) +static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) { /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ @@ -5426,7 +5437,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6() & INTEL_RC6_ENABLE) + if (!dev_priv->rps.ctx_corrupted && + intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ @@ -5484,7 +5496,8 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6() & INTEL_RC6_ENABLE) + if (!dev_priv->rps.ctx_corrupted && + intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; intel_print_rc6_info(dev_priv, rc6_mask); if (IS_BROADWELL(dev_priv)) @@ -6655,6 +6668,95 @@ static void intel_init_emon(struct drm_i915_private *dev_priv) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } +static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv) +{ + return !I915_READ(GEN8_RC6_CTX_INFO); +} + +static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + i915->rps.ctx_corrupted = true; + intel_runtime_pm_get(i915); + } +} + +static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915) +{ + if (i915->rps.ctx_corrupted) { + intel_runtime_pm_put(i915); + i915->rps.ctx_corrupted = false; + } +} + +/** + * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to clean up the RC6 CTX WA before system suspend. + */ +void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915) +{ + if (i915->rps.ctx_corrupted) + intel_runtime_pm_put(i915); +} + +/** + * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @i915: i915 device + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915) +{ + if (!i915->rps.ctx_corrupted) + return; + + if (i915_rc6_ctx_corrupted(i915)) { + intel_runtime_pm_get(i915); + return; + } + + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + i915->rps.ctx_corrupted = false; +} + +static void intel_disable_rc6(struct drm_i915_private *dev_priv); + +/** + * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @i915: i915 device + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. + * + * Return false if no context corruption has happened since the last call of + * this function, true otherwise. +*/ +bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915) +{ + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return false; + + if (i915->rps.ctx_corrupted) + return false; + + if (!i915_rc6_ctx_corrupted(i915)) + return false; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_disable_rc6(i915); + i915->rps.ctx_corrupted = true; + intel_runtime_pm_get_noresume(i915); + + return true; +} + void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { /* @@ -6669,6 +6771,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->rps.hw_lock); + i915_rc6_ctx_wa_init(dev_priv); + /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) cherryview_init_gt_powersave(dev_priv); @@ -6718,6 +6822,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); + i915_rc6_ctx_wa_cleanup(dev_priv); + if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } @@ -6749,27 +6855,47 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) gen6_reset_rps_interrupts(dev_priv); } -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +static void __intel_disable_rc6(struct drm_i915_private *dev_priv) { - if (!READ_ONCE(dev_priv->rps.enabled)) - return; + if (INTEL_GEN(dev_priv) >= 9) + gen9_disable_rc6(dev_priv); + else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rc6(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rc6(dev_priv); + else if (INTEL_GEN(dev_priv) >= 6) + gen6_disable_rc6(dev_priv); +} +static void intel_disable_rc6(struct drm_i915_private *dev_priv) +{ mutex_lock(&dev_priv->rps.hw_lock); + __intel_disable_rc6(dev_priv); + mutex_unlock(&dev_priv->rps.hw_lock); +} - if (INTEL_GEN(dev_priv) >= 9) { - gen9_disable_rc6(dev_priv); +static void intel_disable_rps(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 9) gen9_disable_rps(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) { - cherryview_disable_rps(dev_priv); - } else if (IS_VALLEYVIEW(dev_priv)) { - valleyview_disable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 6) { + else if (INTEL_GEN(dev_priv) >= 6) gen6_disable_rps(dev_priv); - } else if (IS_IRONLAKE_M(dev_priv)) { + else if (IS_IRONLAKE_M(dev_priv)) ironlake_disable_drps(dev_priv); - } +} + +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +{ + if (!READ_ONCE(dev_priv->rps.enabled)) + return; + + mutex_lock(&dev_priv->rps.hw_lock); + + __intel_disable_rc6(dev_priv); + intel_disable_rps(dev_priv); dev_priv->rps.enabled = false; + mutex_unlock(&dev_priv->rps.hw_lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8babfe0ce4e3d4ab8973b9434d1c889f11f55b41..29c3123840aea2fffe3f99a8e025566f8f635c8d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1951,6 +1951,7 @@ void intel_ring_unpin(struct intel_ring *ring) static struct i915_vma * intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { + struct i915_address_space *vm = &dev_priv->ggtt.base; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -1960,10 +1961,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) if (IS_ERR(obj)) return ERR_CAST(obj); - /* mark ring buffers as read-only from GPU side by default */ - obj->gt_ro = 1; + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); - vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + vma = i915_vma_create(obj, vm, NULL); if (IS_ERR(vma)) goto err; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ec0b4a0c605db2f1df88ff7e567cc51d196f2d19..ce14cd8495e8e717871c436f1ad0c9e1b47e958a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -341,7 +341,9 @@ struct intel_engine_cs { struct intel_engine_hangcheck hangcheck; - bool needs_cmd_parser; +#define I915_ENGINE_USING_CMD_PARSER BIT(0) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3) + unsigned int flags; /* * Table of commands the command parser needs to know about @@ -374,7 +376,19 @@ intel_engine_initialized(const struct intel_engine_cs *engine) return engine->i915 != NULL; } -static inline unsigned +static inline bool +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; +} + +static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { return 1 << engine->id; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 48dfc163233ec516a93118b80541759db92f7062..2865876079315d136357bdb86abfb2ce87fc449d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -423,12 +423,15 @@ static int mtk_drm_probe(struct platform_device *pdev) comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; + of_node_put(node); goto err_node; } ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); - if (ret) + if (ret) { + of_node_put(node); goto err_node; + } private->ddp_comp[comp_id] = comp; } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 5899fcf4689a523af831171a361c07e6b4441efc..ace0facf97f808552a6892c925bb6566275717eb 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -124,7 +124,7 @@ static ssize_t debugfs_state_info_read(struct file *file, if (len > count) len = count; - /* TODO: make sure that this does not exceed 4K */ + len = min_t(size_t, len, SZ_4K); if (copy_to_user(buff, buf, len)) { kfree(buf); return -EFAULT; @@ -182,7 +182,7 @@ static ssize_t debugfs_reg_dump_read(struct file *file, if (len > count) len = count; - /* TODO: make sure that this does not exceed 4K */ + len = min_t(size_t, len, SZ_4K); if (copy_to_user(buff, buf, len)) { kfree(buf); return -EFAULT; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c5e1aa5f1d8ea89789054bddd7b5c28f1d607c5f..efa875120071a094e6d366e2e8973ae7346ac578 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -764,7 +764,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_encoder->output_csc = val; - if (connector->encoder->crtc) { + if (connector->encoder && connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index b82ef5ed727ca81ce518af207be037014bbacde6..ac7ae206f2e714a1d069fca2a61316b54de581ec 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 9deed4fac855a586324017a3e573fd2509ee68eb..ce9e55ed26e09244ebc9998a7c0fd4adfe2bcd4e 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -763,6 +763,14 @@ config HID_SPEEDLINK ---help--- Support for Speedlink Vicious and Divine Cezanne mouse. +config HID_STEAM + tristate "Steam Controller support" + depends on HID + ---help--- + Say Y here if you have a Steam Controller if you want to use it + without running the Steam Client. It supports both the wired and + the wireless adaptor. + config HID_STEELSERIES tristate "Steelseries SRW-S1 steering wheel support" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 95c32355f70e15f12440b4d0e980cf8ae2985ed2..a11278bf30f65e444698ff94f02b06c3ec20a110 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o obj-$(CONFIG_HID_SONY) += hid-sony.o obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o +obj-$(CONFIG_HID_STEAM) += hid-steam.o obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index a8b8058245de1d3b7fa40542de5c616a32e01ddb..6d76e96a5166c63a3d1197386175213b0d5a281f 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -55,7 +55,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") struct apple_sc { unsigned long quirks; unsigned int fn_on; - DECLARE_BITMAP(pressed_fn, KEY_CNT); DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -182,6 +181,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, { struct apple_sc *asc = hid_get_drvdata(hid); const struct apple_key_translation *trans, *table; + bool do_translate; + u16 code = 0; if (usage->code == KEY_FN) { asc->fn_on = !!value; @@ -190,8 +191,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, } if (fnmode) { - int do_translate; - if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI && hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) table = macbookair_fn_keys; @@ -203,25 +202,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, trans = apple_find_translation (table, usage->code); if (trans) { - if (test_bit(usage->code, asc->pressed_fn)) - do_translate = 1; - else if (trans->flags & APPLE_FLAG_FKEY) - do_translate = (fnmode == 2 && asc->fn_on) || - (fnmode == 1 && !asc->fn_on); - else - do_translate = asc->fn_on; - - if (do_translate) { - if (value) - set_bit(usage->code, asc->pressed_fn); - else - clear_bit(usage->code, asc->pressed_fn); - - input_event(input, usage->type, trans->to, - value); - - return 1; + if (test_bit(trans->from, input->key)) + code = trans->from; + else if (test_bit(trans->to, input->key)) + code = trans->to; + + if (!code) { + if (trans->flags & APPLE_FLAG_FKEY) { + switch (fnmode) { + case 1: + do_translate = !asc->fn_on; + break; + case 2: + do_translate = asc->fn_on; + break; + default: + /* should never happen */ + do_translate = false; + } + } else { + do_translate = asc->fn_on; + } + + code = do_translate ? trans->to : trans->from; } + + input_event(input, usage->type, code, value); + return 1; } if (asc->quirks & APPLE_NUMLOCK_EMULATION && diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index a594e478a1e218abd629b69f8ca9b19b9335e753..843aed4dec80a8378f42f24d71f56ed0c604e0b5 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid) { struct axff_device *axff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int field_count = 0; int i, j; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 81efba580279b21787da045119b9251c9406e260..332bea8af79c40d8e6e60680c8d4894b31b0570d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -947,6 +947,7 @@ int hid_open_report(struct hid_device *device) __u8 *start; __u8 *buf; __u8 *end; + __u8 *next; int ret; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { @@ -1000,7 +1001,8 @@ int hid_open_report(struct hid_device *device) device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; ret = -EINVAL; - while ((start = fetch_item(start, end, &item)) != NULL) { + while ((next = fetch_item(start, end, &item)) != NULL) { + start = next; if (item.format != HID_ITEM_FORMAT_SHORT) { hid_err(device, "unexpected long global item\n"); @@ -1029,7 +1031,8 @@ int hid_open_report(struct hid_device *device) } } - hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); + hid_err(device, "item fetching failed at offset %u/%u\n", + size - (unsigned int)(end - start), size); err: vfree(parser); hid_close_report(device); @@ -2103,6 +2106,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 818ea7d935333046adc5036d141ffa6cf6be5c40..309969b8dc2ecfb5353d5fbb5799c02ee087f0be 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid) { struct drff_device *drff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c index d82d75bb11f78b591483233bc294aaf3a5939a72..80f9a02dfa69b212b523a9e19a724a14c9edf3a2 100644 --- a/drivers/hid/hid-emsff.c +++ b/drivers/hid/hid-emsff.c @@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid) { struct emsff_device *emsff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c index 2d8cead3adcaadf68e30ea9a4f37c7fc21452d50..5a02c50443cb70057c6c6ae0a0c9fc10f6564781 100644 --- a/drivers/hid/hid-gaff.c +++ b/drivers/hid/hid-gaff.c @@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid) { struct gaff_device *gaff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c index 9325545fc3ae1cac4e3919c1120af6841590d4f4..3e84551cca9c5f1ab4991a5757b24e242a107411 100644 --- a/drivers/hid/hid-holtekff.c +++ b/drivers/hid/hid-holtekff.c @@ -140,13 +140,19 @@ static int holtekff_init(struct hid_device *hid) { struct holtekff_device *holtekff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 568a4c195bc99878629fd32b04ac41968ae30ce0..36d69ee04795f55a6131a2c834ff6b63452be910 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -954,6 +954,11 @@ #define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403 #define USB_DEVICE_ID_MTP_SITRONIX 0x5001 +#define USB_VENDOR_ID_VALVE 0x28de +#define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102 +#define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142 +#define USB_DEVICE_ID_STEAM_CONTROLLER_BT 0x1106 + #define USB_VENDOR_ID_STEELSERIES 0x1038 #define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410 diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 52026dc94d5c4b0306ce585be293cbe2cb1910d9..7e55d3f755dd569bd2c904d83e40710afc53f0d3 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -761,7 +761,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) if (!buf) { ret = -ENOMEM; - goto err_free; + goto err_stop; } ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), @@ -793,9 +793,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = lg4ff_init(hdev); if (ret) - goto err_free; + goto err_stop; return 0; + +err_stop: + hid_hw_stop(hdev); err_free: kfree(drv_data); return ret; @@ -806,8 +809,7 @@ static void lg_remove(struct hid_device *hdev) struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); - else - hid_hw_stop(hdev); + hid_hw_stop(hdev); kfree(drv_data); } diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c index 0e3fb1a7e42174dd1dfb953e7209fc14ab50ed7a..6909d9c2fc67a692a4b580b9749876658371c234 100644 --- a/drivers/hid/hid-lg2ff.c +++ b/drivers/hid/hid-lg2ff.c @@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid) { struct lg2ff_device *lg2ff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7); if (!report) diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c index 8c2da183d3bc71354d82b635b5234a70ca106850..acf739fc40603dcc9fdd4e44668231b760f931c5 100644 --- a/drivers/hid/hid-lg3ff.c +++ b/drivers/hid/hid-lg3ff.c @@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = { int lg3ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 1fc12e3570359248f2cd2847aad219ad4f134a81..1b109a5cf922ddd6ab9ff743cf29193bf5fb6e91 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -1261,8 +1261,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc int lg4ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); @@ -1274,6 +1274,13 @@ int lg4ff_init(struct hid_device *hid) int mmode_ret, mmode_idx = -1; u16 real_product_id; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; @@ -1485,7 +1492,6 @@ int lg4ff_deinit(struct hid_device *hid) } } #endif - hid_hw_stop(hid); drv_data->device_props = NULL; kfree(entry); diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index e1394af0ae7ba06701ad106896fb9391c417d6c0..1871cdcd1e0a8c5d93273b4dd58174efcaabdb14 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude) int lgff_init(struct hid_device* hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff_joystick; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -ENODEV; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 38d9deb03d16c3094cdfce936c6938ad705b7beb..434438334fb13815674cc781636be4cd0f1a4e72 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1238,8 +1238,8 @@ static void hidpp_ff_destroy(struct ff_device *ff) static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) { struct hid_device *hid = hidpp->hid_dev; - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); struct ff_device *ff; @@ -1248,6 +1248,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) int error, j, num_slots; u8 version; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (!dev) { hid_err(hid, "Struct input_dev not set!\n"); return -EINVAL; diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index f095bf8a3aa94c0a1b6977d4ed41d763548b427e..762f33817dd0d4a4fe04bbfe6761ec2412d19e0c 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -556,10 +556,14 @@ static void pcmidi_setup_extra_keys( static int pcmidi_set_operational(struct pcmidi_snd *pm) { + int rc; + if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ - pcmidi_get_output_report(pm); + rc = pcmidi_get_output_report(pm); + if (rc < 0) + return rc; pcmidi_submit_output_report(pm, 0xc1); return 0; } @@ -688,7 +692,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm) spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); - pcmidi_set_operational(pm); + err = pcmidi_set_operational(pm); + if (err < 0) { + pk_error("failed to find output report\n"); + goto fail_register; + } /* register it */ err = snd_card_register(card); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index d03203a82e8f78962697791590ef45b35c003833..c8b07a182c0bfadb97d678deae6d6690465acd5f 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -578,10 +578,14 @@ static void sony_set_leds(struct sony_sc *sc); static inline void sony_schedule_work(struct sony_sc *sc, enum sony_worker which) { + unsigned long flags; + switch (which) { case SONY_WORKER_STATE: - if (!sc->defer_initialization) + spin_lock_irqsave(&sc->lock, flags); + if (!sc->defer_initialization && sc->state_worker_initialized) schedule_work(&sc->state_worker); + spin_unlock_irqrestore(&sc->lock, flags); break; case SONY_WORKER_HOTPLUG: if (sc->hotplug_worker_initialized) @@ -2159,9 +2163,15 @@ static int sony_play_effect(struct input_dev *dev, void *data, static int sony_init_ff(struct sony_sc *sc) { - struct hid_input *hidinput = list_entry(sc->hdev->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; + + if (list_empty(&sc->hdev->inputs)) { + hid_err(sc->hdev, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list); + input_dev = hidinput->input; input_set_capability(input_dev, EV_FF, FF_RUMBLE); return input_ff_create_memless(input_dev, NULL, sony_play_effect); @@ -2488,13 +2498,18 @@ static inline void sony_init_output_report(struct sony_sc *sc, static inline void sony_cancel_work_sync(struct sony_sc *sc) { + unsigned long flags; + if (sc->hotplug_worker_initialized) cancel_work_sync(&sc->hotplug_worker); - if (sc->state_worker_initialized) + if (sc->state_worker_initialized) { + spin_lock_irqsave(&sc->lock, flags); + sc->state_worker_initialized = 0; + spin_unlock_irqrestore(&sc->lock, flags); cancel_work_sync(&sc->state_worker); + } } - static int sony_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { @@ -2701,7 +2716,6 @@ static int sony_input_configured(struct hid_device *hdev, kfree(sc->output_report_dmabuf); sony_remove_dev_list(sc); sony_release_device_id(sc); - hid_hw_stop(hdev); return ret; } @@ -2763,6 +2777,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) */ if (!(hdev->claimed & HID_CLAIMED_INPUT)) { hid_err(hdev, "failed to claim input\n"); + hid_hw_stop(hdev); return -ENODEV; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c new file mode 100644 index 0000000000000000000000000000000000000000..44e1eefc5b24faa9a0f0773e10a39924b6024fbe --- /dev/null +++ b/drivers/hid/hid-steam.c @@ -0,0 +1,1141 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * HID driver for Valve Steam Controller + * + * Copyright (c) 2018 Rodrigo Rivas Costa + * + * Supports both the wired and wireless interfaces. + * + * This controller has a builtin emulation of mouse and keyboard: the right pad + * can be used as a mouse, the shoulder buttons are mouse buttons, A and B + * buttons are ENTER and ESCAPE, and so on. This is implemented as additional + * HID interfaces. + * + * This is known as the "lizard mode", because apparently lizards like to use + * the computer from the coach, without a proper mouse and keyboard. + * + * This driver will disable the lizard mode when the input device is opened + * and re-enable it when the input device is closed, so as not to break user + * mode behaviour. The lizard_mode parameter can be used to change that. + * + * There are a few user space applications (notably Steam Client) that use + * the hidraw interface directly to create input devices (XTest, uinput...). + * In order to avoid breaking them this driver creates a layered hidraw device, + * so it can detect when the client is running and then: + * - it will not send any command to the controller. + * - this input device will be removed, to avoid double input of the same + * user action. + * When the client is closed, this input device will be created again. + * + * For additional functions, such as changing the right-pad margin or switching + * the led, you can use the user-space tool at: + * + * https://github.com/rodrigorc/steamctrl + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hid-ids.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Rodrigo Rivas Costa "); + +static bool lizard_mode = true; + +static DEFINE_MUTEX(steam_devices_lock); +static LIST_HEAD(steam_devices); + +#define STEAM_QUIRK_WIRELESS BIT(0) + +/* Touch pads are 40 mm in diameter and 65535 units */ +#define STEAM_PAD_RESOLUTION 1638 +/* Trigger runs are about 5 mm and 256 units */ +#define STEAM_TRIGGER_RESOLUTION 51 +/* Joystick runs are about 5 mm and 256 units */ +#define STEAM_JOYSTICK_RESOLUTION 51 + +#define STEAM_PAD_FUZZ 256 + +/* + * Commands that can be sent in a feature report. + * Thanks to Valve for some valuable hints. + */ +#define STEAM_CMD_SET_MAPPINGS 0x80 +#define STEAM_CMD_CLEAR_MAPPINGS 0x81 +#define STEAM_CMD_GET_MAPPINGS 0x82 +#define STEAM_CMD_GET_ATTRIB 0x83 +#define STEAM_CMD_GET_ATTRIB_LABEL 0x84 +#define STEAM_CMD_DEFAULT_MAPPINGS 0x85 +#define STEAM_CMD_FACTORY_RESET 0x86 +#define STEAM_CMD_WRITE_REGISTER 0x87 +#define STEAM_CMD_CLEAR_REGISTER 0x88 +#define STEAM_CMD_READ_REGISTER 0x89 +#define STEAM_CMD_GET_REGISTER_LABEL 0x8a +#define STEAM_CMD_GET_REGISTER_MAX 0x8b +#define STEAM_CMD_GET_REGISTER_DEFAULT 0x8c +#define STEAM_CMD_SET_MODE 0x8d +#define STEAM_CMD_DEFAULT_MOUSE 0x8e +#define STEAM_CMD_FORCEFEEDBAK 0x8f +#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4 +#define STEAM_CMD_GET_SERIAL 0xae + +/* Some useful register ids */ +#define STEAM_REG_LPAD_MODE 0x07 +#define STEAM_REG_RPAD_MODE 0x08 +#define STEAM_REG_RPAD_MARGIN 0x18 +#define STEAM_REG_LED 0x2d +#define STEAM_REG_GYRO_MODE 0x30 + +/* Raw event identifiers */ +#define STEAM_EV_INPUT_DATA 0x01 +#define STEAM_EV_CONNECT 0x03 +#define STEAM_EV_BATTERY 0x04 + +/* Values for GYRO_MODE (bitmask) */ +#define STEAM_GYRO_MODE_OFF 0x0000 +#define STEAM_GYRO_MODE_STEERING 0x0001 +#define STEAM_GYRO_MODE_TILT 0x0002 +#define STEAM_GYRO_MODE_SEND_ORIENTATION 0x0004 +#define STEAM_GYRO_MODE_SEND_RAW_ACCEL 0x0008 +#define STEAM_GYRO_MODE_SEND_RAW_GYRO 0x0010 + +/* Other random constants */ +#define STEAM_SERIAL_LEN 10 + +struct steam_device { + struct list_head list; + spinlock_t lock; + struct hid_device *hdev, *client_hdev; + struct mutex mutex; + bool client_opened; + struct input_dev __rcu *input; + unsigned long quirks; + struct work_struct work_connect; + bool connected; + char serial_no[STEAM_SERIAL_LEN + 1]; + struct power_supply_desc battery_desc; + struct power_supply __rcu *battery; + u8 battery_charge; + u16 voltage; +}; + +static int steam_recv_report(struct steam_device *steam, + u8 *data, int size) +{ + struct hid_report *r; + u8 *buf; + int ret; + + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; + if (hid_report_len(r) < 64) + return -EINVAL; + + buf = hid_alloc_report_buf(r, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * The report ID is always 0, so strip the first byte from the output. + * hid_report_len() is not counting the report ID, so +1 to the length + * or else we get a EOVERFLOW. We are safe from a buffer overflow + * because hid_alloc_report_buf() allocates +7 bytes. + */ + ret = hid_hw_raw_request(steam->hdev, 0x00, + buf, hid_report_len(r) + 1, + HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + if (ret > 0) + memcpy(data, buf + 1, min(size, ret - 1)); + kfree(buf); + return ret; +} + +static int steam_send_report(struct steam_device *steam, + u8 *cmd, int size) +{ + struct hid_report *r; + u8 *buf; + unsigned int retries = 50; + int ret; + + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; + if (hid_report_len(r) < 64) + return -EINVAL; + + buf = hid_alloc_report_buf(r, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* The report ID is always 0 */ + memcpy(buf + 1, cmd, size); + + /* + * Sometimes the wireless controller fails with EPIPE + * when sending a feature report. + * Doing a HID_REQ_GET_REPORT and waiting for a while + * seems to fix that. + */ + do { + ret = hid_hw_raw_request(steam->hdev, 0, + buf, size + 1, + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + if (ret != -EPIPE) + break; + msleep(20); + } while (--retries); + + kfree(buf); + if (ret < 0) + hid_err(steam->hdev, "%s: error %d (%*ph)\n", __func__, + ret, size, cmd); + return ret; +} + +static inline int steam_send_report_byte(struct steam_device *steam, u8 cmd) +{ + return steam_send_report(steam, &cmd, 1); +} + +static int steam_write_registers(struct steam_device *steam, + /* u8 reg, u16 val */...) +{ + /* Send: 0x87 len (reg valLo valHi)* */ + u8 reg; + u16 val; + u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00}; + va_list args; + + va_start(args, steam); + for (;;) { + reg = va_arg(args, int); + if (reg == 0) + break; + val = va_arg(args, int); + cmd[cmd[1] + 2] = reg; + cmd[cmd[1] + 3] = val & 0xff; + cmd[cmd[1] + 4] = val >> 8; + cmd[1] += 3; + } + va_end(args); + + return steam_send_report(steam, cmd, 2 + cmd[1]); +} + +static int steam_get_serial(struct steam_device *steam) +{ + /* + * Send: 0xae 0x15 0x01 + * Recv: 0xae 0x15 0x01 serialnumber (10 chars) + */ + int ret; + u8 cmd[] = {STEAM_CMD_GET_SERIAL, 0x15, 0x01}; + u8 reply[3 + STEAM_SERIAL_LEN + 1]; + + ret = steam_send_report(steam, cmd, sizeof(cmd)); + if (ret < 0) + return ret; + ret = steam_recv_report(steam, reply, sizeof(reply)); + if (ret < 0) + return ret; + if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01) + return -EIO; + reply[3 + STEAM_SERIAL_LEN] = 0; + strlcpy(steam->serial_no, reply + 3, sizeof(steam->serial_no)); + return 0; +} + +/* + * This command requests the wireless adaptor to post an event + * with the connection status. Useful if this driver is loaded when + * the controller is already connected. + */ +static inline int steam_request_conn_status(struct steam_device *steam) +{ + return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS); +} + +static void steam_set_lizard_mode(struct steam_device *steam, bool enable) +{ + if (enable) { + /* enable esc, enter, cursors */ + steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS); + /* enable mouse */ + steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MOUSE); + steam_write_registers(steam, + STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */ + 0); + } else { + /* disable esc, enter, cursor */ + steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS); + steam_write_registers(steam, + STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */ + STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */ + 0); + } +} + +static int steam_input_open(struct input_dev *dev) +{ + struct steam_device *steam = input_get_drvdata(dev); + + mutex_lock(&steam->mutex); + if (!steam->client_opened && lizard_mode) + steam_set_lizard_mode(steam, false); + mutex_unlock(&steam->mutex); + return 0; +} + +static void steam_input_close(struct input_dev *dev) +{ + struct steam_device *steam = input_get_drvdata(dev); + + mutex_lock(&steam->mutex); + if (!steam->client_opened && lizard_mode) + steam_set_lizard_mode(steam, true); + mutex_unlock(&steam->mutex); +} + +static enum power_supply_property steam_battery_props[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_SCOPE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CAPACITY, +}; + +static int steam_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct steam_device *steam = power_supply_get_drvdata(psy); + unsigned long flags; + s16 volts; + u8 batt; + int ret = 0; + + spin_lock_irqsave(&steam->lock, flags); + volts = steam->voltage; + batt = steam->battery_charge; + spin_unlock_irqrestore(&steam->lock, flags); + + switch (psp) { + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_DEVICE; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = volts * 1000; /* mV -> uV */ + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = batt; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int steam_battery_register(struct steam_device *steam) +{ + struct power_supply *battery; + struct power_supply_config battery_cfg = { .drv_data = steam, }; + unsigned long flags; + int ret; + + steam->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; + steam->battery_desc.properties = steam_battery_props; + steam->battery_desc.num_properties = ARRAY_SIZE(steam_battery_props); + steam->battery_desc.get_property = steam_battery_get_property; + steam->battery_desc.name = devm_kasprintf(&steam->hdev->dev, + GFP_KERNEL, "steam-controller-%s-battery", + steam->serial_no); + if (!steam->battery_desc.name) + return -ENOMEM; + + /* avoid the warning of 0% battery while waiting for the first info */ + spin_lock_irqsave(&steam->lock, flags); + steam->voltage = 3000; + steam->battery_charge = 100; + spin_unlock_irqrestore(&steam->lock, flags); + + battery = power_supply_register(&steam->hdev->dev, + &steam->battery_desc, &battery_cfg); + if (IS_ERR(battery)) { + ret = PTR_ERR(battery); + hid_err(steam->hdev, + "%s:power_supply_register failed with error %d\n", + __func__, ret); + return ret; + } + rcu_assign_pointer(steam->battery, battery); + power_supply_powers(battery, &steam->hdev->dev); + return 0; +} + +static int steam_input_register(struct steam_device *steam) +{ + struct hid_device *hdev = steam->hdev; + struct input_dev *input; + int ret; + + rcu_read_lock(); + input = rcu_dereference(steam->input); + rcu_read_unlock(); + if (input) { + dbg_hid("%s: already connected\n", __func__); + return 0; + } + + input = input_allocate_device(); + if (!input) + return -ENOMEM; + + input_set_drvdata(input, steam); + input->dev.parent = &hdev->dev; + input->open = steam_input_open; + input->close = steam_input_close; + + input->name = (steam->quirks & STEAM_QUIRK_WIRELESS) ? + "Wireless Steam Controller" : + "Steam Controller"; + input->phys = hdev->phys; + input->uniq = steam->serial_no; + input->id.bustype = hdev->bus; + input->id.vendor = hdev->vendor; + input->id.product = hdev->product; + input->id.version = hdev->version; + + input_set_capability(input, EV_KEY, BTN_TR2); + input_set_capability(input, EV_KEY, BTN_TL2); + input_set_capability(input, EV_KEY, BTN_TR); + input_set_capability(input, EV_KEY, BTN_TL); + input_set_capability(input, EV_KEY, BTN_Y); + input_set_capability(input, EV_KEY, BTN_B); + input_set_capability(input, EV_KEY, BTN_X); + input_set_capability(input, EV_KEY, BTN_A); + input_set_capability(input, EV_KEY, BTN_DPAD_UP); + input_set_capability(input, EV_KEY, BTN_DPAD_RIGHT); + input_set_capability(input, EV_KEY, BTN_DPAD_LEFT); + input_set_capability(input, EV_KEY, BTN_DPAD_DOWN); + input_set_capability(input, EV_KEY, BTN_SELECT); + input_set_capability(input, EV_KEY, BTN_MODE); + input_set_capability(input, EV_KEY, BTN_START); + input_set_capability(input, EV_KEY, BTN_GEAR_DOWN); + input_set_capability(input, EV_KEY, BTN_GEAR_UP); + input_set_capability(input, EV_KEY, BTN_THUMBR); + input_set_capability(input, EV_KEY, BTN_THUMBL); + input_set_capability(input, EV_KEY, BTN_THUMB); + input_set_capability(input, EV_KEY, BTN_THUMB2); + + input_set_abs_params(input, ABS_HAT2Y, 0, 255, 0, 0); + input_set_abs_params(input, ABS_HAT2X, 0, 255, 0, 0); + input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0); + input_set_abs_params(input, ABS_Y, -32767, 32767, 0, 0); + input_set_abs_params(input, ABS_RX, -32767, 32767, + STEAM_PAD_FUZZ, 0); + input_set_abs_params(input, ABS_RY, -32767, 32767, + STEAM_PAD_FUZZ, 0); + input_set_abs_params(input, ABS_HAT0X, -32767, 32767, + STEAM_PAD_FUZZ, 0); + input_set_abs_params(input, ABS_HAT0Y, -32767, 32767, + STEAM_PAD_FUZZ, 0); + input_abs_set_res(input, ABS_X, STEAM_JOYSTICK_RESOLUTION); + input_abs_set_res(input, ABS_Y, STEAM_JOYSTICK_RESOLUTION); + input_abs_set_res(input, ABS_RX, STEAM_PAD_RESOLUTION); + input_abs_set_res(input, ABS_RY, STEAM_PAD_RESOLUTION); + input_abs_set_res(input, ABS_HAT0X, STEAM_PAD_RESOLUTION); + input_abs_set_res(input, ABS_HAT0Y, STEAM_PAD_RESOLUTION); + input_abs_set_res(input, ABS_HAT2Y, STEAM_TRIGGER_RESOLUTION); + input_abs_set_res(input, ABS_HAT2X, STEAM_TRIGGER_RESOLUTION); + + ret = input_register_device(input); + if (ret) + goto input_register_fail; + + rcu_assign_pointer(steam->input, input); + return 0; + +input_register_fail: + input_free_device(input); + return ret; +} + +static void steam_input_unregister(struct steam_device *steam) +{ + struct input_dev *input; + rcu_read_lock(); + input = rcu_dereference(steam->input); + rcu_read_unlock(); + if (!input) + return; + RCU_INIT_POINTER(steam->input, NULL); + synchronize_rcu(); + input_unregister_device(input); +} + +static void steam_battery_unregister(struct steam_device *steam) +{ + struct power_supply *battery; + + rcu_read_lock(); + battery = rcu_dereference(steam->battery); + rcu_read_unlock(); + + if (!battery) + return; + RCU_INIT_POINTER(steam->battery, NULL); + synchronize_rcu(); + power_supply_unregister(battery); +} + +static int steam_register(struct steam_device *steam) +{ + int ret; + bool client_opened; + + /* + * This function can be called several times in a row with the + * wireless adaptor, without steam_unregister() between them, because + * another client send a get_connection_status command, for example. + * The battery and serial number are set just once per device. + */ + if (!steam->serial_no[0]) { + /* + * Unlikely, but getting the serial could fail, and it is not so + * important, so make up a serial number and go on. + */ + mutex_lock(&steam->mutex); + if (steam_get_serial(steam) < 0) + strlcpy(steam->serial_no, "XXXXXXXXXX", + sizeof(steam->serial_no)); + mutex_unlock(&steam->mutex); + + hid_info(steam->hdev, "Steam Controller '%s' connected", + steam->serial_no); + + /* ignore battery errors, we can live without it */ + if (steam->quirks & STEAM_QUIRK_WIRELESS) + steam_battery_register(steam); + + mutex_lock(&steam_devices_lock); + list_add(&steam->list, &steam_devices); + mutex_unlock(&steam_devices_lock); + } + + mutex_lock(&steam->mutex); + client_opened = steam->client_opened; + if (!client_opened) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + if (!client_opened) + ret = steam_input_register(steam); + else + ret = 0; + + return ret; +} + +static void steam_unregister(struct steam_device *steam) +{ + steam_battery_unregister(steam); + steam_input_unregister(steam); + if (steam->serial_no[0]) { + hid_info(steam->hdev, "Steam Controller '%s' disconnected", + steam->serial_no); + mutex_lock(&steam_devices_lock); + list_del(&steam->list); + mutex_unlock(&steam_devices_lock); + steam->serial_no[0] = 0; + } +} + +static void steam_work_connect_cb(struct work_struct *work) +{ + struct steam_device *steam = container_of(work, struct steam_device, + work_connect); + unsigned long flags; + bool connected; + int ret; + + spin_lock_irqsave(&steam->lock, flags); + connected = steam->connected; + spin_unlock_irqrestore(&steam->lock, flags); + + if (connected) { + ret = steam_register(steam); + if (ret) { + hid_err(steam->hdev, + "%s:steam_register failed with error %d\n", + __func__, ret); + } + } else { + steam_unregister(steam); + } +} + +static bool steam_is_valve_interface(struct hid_device *hdev) +{ + struct hid_report_enum *rep_enum; + + /* + * The wired device creates 3 interfaces: + * 0: emulated mouse. + * 1: emulated keyboard. + * 2: the real game pad. + * The wireless device creates 5 interfaces: + * 0: emulated keyboard. + * 1-4: slots where up to 4 real game pads will be connected to. + * We know which one is the real gamepad interface because they are the + * only ones with a feature report. + */ + rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; + return !list_empty(&rep_enum->report_list); +} + +static int steam_client_ll_parse(struct hid_device *hdev) +{ + struct steam_device *steam = hdev->driver_data; + + return hid_parse_report(hdev, steam->hdev->dev_rdesc, + steam->hdev->dev_rsize); +} + +static int steam_client_ll_start(struct hid_device *hdev) +{ + return 0; +} + +static void steam_client_ll_stop(struct hid_device *hdev) +{ +} + +static int steam_client_ll_open(struct hid_device *hdev) +{ + struct steam_device *steam = hdev->driver_data; + + mutex_lock(&steam->mutex); + steam->client_opened = true; + mutex_unlock(&steam->mutex); + + steam_input_unregister(steam); + + return 0; +} + +static void steam_client_ll_close(struct hid_device *hdev) +{ + struct steam_device *steam = hdev->driver_data; + + unsigned long flags; + bool connected; + + spin_lock_irqsave(&steam->lock, flags); + connected = steam->connected; + spin_unlock_irqrestore(&steam->lock, flags); + + mutex_lock(&steam->mutex); + steam->client_opened = false; + if (connected) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + if (connected) + steam_input_register(steam); +} + +static int steam_client_ll_raw_request(struct hid_device *hdev, + unsigned char reportnum, u8 *buf, + size_t count, unsigned char report_type, + int reqtype) +{ + struct steam_device *steam = hdev->driver_data; + + return hid_hw_raw_request(steam->hdev, reportnum, buf, count, + report_type, reqtype); +} + +static struct hid_ll_driver steam_client_ll_driver = { + .parse = steam_client_ll_parse, + .start = steam_client_ll_start, + .stop = steam_client_ll_stop, + .open = steam_client_ll_open, + .close = steam_client_ll_close, + .raw_request = steam_client_ll_raw_request, +}; + +static struct hid_device *steam_create_client_hid(struct hid_device *hdev) +{ + struct hid_device *client_hdev; + + client_hdev = hid_allocate_device(); + if (IS_ERR(client_hdev)) + return client_hdev; + + client_hdev->ll_driver = &steam_client_ll_driver; + client_hdev->dev.parent = hdev->dev.parent; + client_hdev->bus = hdev->bus; + client_hdev->vendor = hdev->vendor; + client_hdev->product = hdev->product; + client_hdev->version = hdev->version; + client_hdev->type = hdev->type; + client_hdev->country = hdev->country; + strlcpy(client_hdev->name, hdev->name, + sizeof(client_hdev->name)); + strlcpy(client_hdev->phys, hdev->phys, + sizeof(client_hdev->phys)); + /* + * Since we use the same device info than the real interface to + * trick userspace, we will be calling steam_probe recursively. + * We need to recognize the client interface somehow. + */ + client_hdev->group = HID_GROUP_STEAM; + return client_hdev; +} + +static int steam_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + struct steam_device *steam; + int ret; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, + "%s:parse of hid interface failed\n", __func__); + return ret; + } + + /* + * The virtual client_dev is only used for hidraw. + * Also avoid the recursive probe. + */ + if (hdev->group == HID_GROUP_STEAM) + return hid_hw_start(hdev, HID_CONNECT_HIDRAW); + /* + * The non-valve interfaces (mouse and keyboard emulation) are + * connected without changes. + */ + if (!steam_is_valve_interface(hdev)) + return hid_hw_start(hdev, HID_CONNECT_DEFAULT); + + steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL); + if (!steam) { + ret = -ENOMEM; + goto steam_alloc_fail; + } + steam->hdev = hdev; + hid_set_drvdata(hdev, steam); + spin_lock_init(&steam->lock); + mutex_init(&steam->mutex); + steam->quirks = id->driver_data; + INIT_WORK(&steam->work_connect, steam_work_connect_cb); + + steam->client_hdev = steam_create_client_hid(hdev); + if (IS_ERR(steam->client_hdev)) { + ret = PTR_ERR(steam->client_hdev); + goto client_hdev_fail; + } + steam->client_hdev->driver_data = steam; + + /* + * With the real steam controller interface, do not connect hidraw. + * Instead, create the client_hid and connect that. + */ + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW); + if (ret) + goto hid_hw_start_fail; + + ret = hid_add_device(steam->client_hdev); + if (ret) + goto client_hdev_add_fail; + + ret = hid_hw_open(hdev); + if (ret) { + hid_err(hdev, + "%s:hid_hw_open\n", + __func__); + goto hid_hw_open_fail; + } + + if (steam->quirks & STEAM_QUIRK_WIRELESS) { + hid_info(hdev, "Steam wireless receiver connected"); + steam_request_conn_status(steam); + } else { + ret = steam_register(steam); + if (ret) { + hid_err(hdev, + "%s:steam_register failed with error %d\n", + __func__, ret); + goto input_register_fail; + } + } + + return 0; + +input_register_fail: +hid_hw_open_fail: +client_hdev_add_fail: + hid_hw_stop(hdev); +hid_hw_start_fail: + hid_destroy_device(steam->client_hdev); +client_hdev_fail: + cancel_work_sync(&steam->work_connect); +steam_alloc_fail: + hid_err(hdev, "%s: failed with error %d\n", + __func__, ret); + return ret; +} + +static void steam_remove(struct hid_device *hdev) +{ + struct steam_device *steam = hid_get_drvdata(hdev); + + if (!steam || hdev->group == HID_GROUP_STEAM) { + hid_hw_stop(hdev); + return; + } + + hid_destroy_device(steam->client_hdev); + steam->client_opened = false; + cancel_work_sync(&steam->work_connect); + if (steam->quirks & STEAM_QUIRK_WIRELESS) { + hid_info(hdev, "Steam wireless receiver disconnected"); + } + hid_hw_close(hdev); + hid_hw_stop(hdev); + steam_unregister(steam); +} + +static void steam_do_connect_event(struct steam_device *steam, bool connected) +{ + unsigned long flags; + bool changed; + + spin_lock_irqsave(&steam->lock, flags); + changed = steam->connected != connected; + steam->connected = connected; + spin_unlock_irqrestore(&steam->lock, flags); + + if (changed && schedule_work(&steam->work_connect) == 0) + dbg_hid("%s: connected=%d event already queued\n", + __func__, connected); +} + +/* + * Some input data in the protocol has the opposite sign. + * Clamp the values to 32767..-32767 so that the range is + * symmetrical and can be negated safely. + */ +static inline s16 steam_le16(u8 *data) +{ + s16 x = (s16) le16_to_cpup((__le16 *)data); + + return x == -32768 ? -32767 : x; +} + +/* + * The size for this message payload is 60. + * The known values are: + * (* values are not sent through wireless) + * (* accelerator/gyro is disabled by default) + * Offset| Type | Mapped to |Meaning + * -------+-------+-----------+-------------------------- + * 4-7 | u32 | -- | sequence number + * 8-10 | 24bit | see below | buttons + * 11 | u8 | ABS_HAT2Y | left trigger + * 12 | u8 | ABS_HAT2X | right trigger + * 13-15 | -- | -- | always 0 + * 16-17 | s16 | ABS_X/ABS_HAT0X | X value + * 18-19 | s16 | ABS_Y/ABS_HAT0Y | Y value + * 20-21 | s16 | ABS_RX | right-pad X value + * 22-23 | s16 | ABS_RY | right-pad Y value + * 24-25 | s16 | -- | * left trigger + * 26-27 | s16 | -- | * right trigger + * 28-29 | s16 | -- | * accelerometer X value + * 30-31 | s16 | -- | * accelerometer Y value + * 32-33 | s16 | -- | * accelerometer Z value + * 34-35 | s16 | -- | gyro X value + * 36-36 | s16 | -- | gyro Y value + * 38-39 | s16 | -- | gyro Z value + * 40-41 | s16 | -- | quaternion W value + * 42-43 | s16 | -- | quaternion X value + * 44-45 | s16 | -- | quaternion Y value + * 46-47 | s16 | -- | quaternion Z value + * 48-49 | -- | -- | always 0 + * 50-51 | s16 | -- | * left trigger (uncalibrated) + * 52-53 | s16 | -- | * right trigger (uncalibrated) + * 54-55 | s16 | -- | * joystick X value (uncalibrated) + * 56-57 | s16 | -- | * joystick Y value (uncalibrated) + * 58-59 | s16 | -- | * left-pad X value + * 60-61 | s16 | -- | * left-pad Y value + * 62-63 | u16 | -- | * battery voltage + * + * The buttons are: + * Bit | Mapped to | Description + * ------+------------+-------------------------------- + * 8.0 | BTN_TR2 | right trigger fully pressed + * 8.1 | BTN_TL2 | left trigger fully pressed + * 8.2 | BTN_TR | right shoulder + * 8.3 | BTN_TL | left shoulder + * 8.4 | BTN_Y | button Y + * 8.5 | BTN_B | button B + * 8.6 | BTN_X | button X + * 8.7 | BTN_A | button A + * 9.0 | BTN_DPAD_UP | lef-pad up + * 9.1 | BTN_DPAD_RIGHT | lef-pad right + * 9.2 | BTN_DPAD_LEFT | lef-pad left + * 9.3 | BTN_DPAD_DOWN | lef-pad down + * 9.4 | BTN_SELECT | menu left + * 9.5 | BTN_MODE | steam logo + * 9.6 | BTN_START | menu right + * 9.7 | BTN_GEAR_DOWN | left back lever + * 10.0 | BTN_GEAR_UP | right back lever + * 10.1 | -- | left-pad clicked + * 10.2 | BTN_THUMBR | right-pad clicked + * 10.3 | BTN_THUMB | left-pad touched (but see explanation below) + * 10.4 | BTN_THUMB2 | right-pad touched + * 10.5 | -- | unknown + * 10.6 | BTN_THUMBL | joystick clicked + * 10.7 | -- | lpad_and_joy + */ + +static void steam_do_input_event(struct steam_device *steam, + struct input_dev *input, u8 *data) +{ + /* 24 bits of buttons */ + u8 b8, b9, b10; + s16 x, y; + bool lpad_touched, lpad_and_joy; + + b8 = data[8]; + b9 = data[9]; + b10 = data[10]; + + input_report_abs(input, ABS_HAT2Y, data[11]); + input_report_abs(input, ABS_HAT2X, data[12]); + + /* + * These two bits tells how to interpret the values X and Y. + * lpad_and_joy tells that the joystick and the lpad are used at the + * same time. + * lpad_touched tells whether X/Y are to be read as lpad coord or + * joystick values. + * (lpad_touched || lpad_and_joy) tells if the lpad is really touched. + */ + lpad_touched = b10 & BIT(3); + lpad_and_joy = b10 & BIT(7); + x = steam_le16(data + 16); + y = -steam_le16(data + 18); + + input_report_abs(input, lpad_touched ? ABS_HAT0X : ABS_X, x); + input_report_abs(input, lpad_touched ? ABS_HAT0Y : ABS_Y, y); + /* Check if joystick is centered */ + if (lpad_touched && !lpad_and_joy) { + input_report_abs(input, ABS_X, 0); + input_report_abs(input, ABS_Y, 0); + } + /* Check if lpad is untouched */ + if (!(lpad_touched || lpad_and_joy)) { + input_report_abs(input, ABS_HAT0X, 0); + input_report_abs(input, ABS_HAT0Y, 0); + } + + input_report_abs(input, ABS_RX, steam_le16(data + 20)); + input_report_abs(input, ABS_RY, -steam_le16(data + 22)); + + input_event(input, EV_KEY, BTN_TR2, !!(b8 & BIT(0))); + input_event(input, EV_KEY, BTN_TL2, !!(b8 & BIT(1))); + input_event(input, EV_KEY, BTN_TR, !!(b8 & BIT(2))); + input_event(input, EV_KEY, BTN_TL, !!(b8 & BIT(3))); + input_event(input, EV_KEY, BTN_Y, !!(b8 & BIT(4))); + input_event(input, EV_KEY, BTN_B, !!(b8 & BIT(5))); + input_event(input, EV_KEY, BTN_X, !!(b8 & BIT(6))); + input_event(input, EV_KEY, BTN_A, !!(b8 & BIT(7))); + input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4))); + input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5))); + input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6))); + input_event(input, EV_KEY, BTN_GEAR_DOWN, !!(b9 & BIT(7))); + input_event(input, EV_KEY, BTN_GEAR_UP, !!(b10 & BIT(0))); + input_event(input, EV_KEY, BTN_THUMBR, !!(b10 & BIT(2))); + input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6))); + input_event(input, EV_KEY, BTN_THUMB, lpad_touched || lpad_and_joy); + input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(4))); + input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0))); + input_event(input, EV_KEY, BTN_DPAD_RIGHT, !!(b9 & BIT(1))); + input_event(input, EV_KEY, BTN_DPAD_LEFT, !!(b9 & BIT(2))); + input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3))); + + input_sync(input); +} + +/* + * The size for this message payload is 11. + * The known values are: + * Offset| Type | Meaning + * -------+-------+--------------------------- + * 4-7 | u32 | sequence number + * 8-11 | -- | always 0 + * 12-13 | u16 | voltage (mV) + * 14 | u8 | battery percent + */ +static void steam_do_battery_event(struct steam_device *steam, + struct power_supply *battery, u8 *data) +{ + unsigned long flags; + + s16 volts = steam_le16(data + 12); + u8 batt = data[14]; + + /* Creating the battery may have failed */ + rcu_read_lock(); + battery = rcu_dereference(steam->battery); + if (likely(battery)) { + spin_lock_irqsave(&steam->lock, flags); + steam->voltage = volts; + steam->battery_charge = batt; + spin_unlock_irqrestore(&steam->lock, flags); + power_supply_changed(battery); + } + rcu_read_unlock(); +} + +static int steam_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, + int size) +{ + struct steam_device *steam = hid_get_drvdata(hdev); + struct input_dev *input; + struct power_supply *battery; + + if (!steam) + return 0; + + if (steam->client_opened) + hid_input_report(steam->client_hdev, HID_FEATURE_REPORT, + data, size, 0); + /* + * All messages are size=64, all values little-endian. + * The format is: + * Offset| Meaning + * -------+-------------------------------------------- + * 0-1 | always 0x01, 0x00, maybe protocol version? + * 2 | type of message + * 3 | length of the real payload (not checked) + * 4-n | payload data, depends on the type + * + * There are these known types of message: + * 0x01: input data (60 bytes) + * 0x03: wireless connect/disconnect (1 byte) + * 0x04: battery status (11 bytes) + */ + + if (size != 64 || data[0] != 1 || data[1] != 0) + return 0; + + switch (data[2]) { + case STEAM_EV_INPUT_DATA: + if (steam->client_opened) + return 0; + rcu_read_lock(); + input = rcu_dereference(steam->input); + if (likely(input)) + steam_do_input_event(steam, input, data); + rcu_read_unlock(); + break; + case STEAM_EV_CONNECT: + /* + * The payload of this event is a single byte: + * 0x01: disconnected. + * 0x02: connected. + */ + switch (data[4]) { + case 0x01: + steam_do_connect_event(steam, false); + break; + case 0x02: + steam_do_connect_event(steam, true); + break; + } + break; + case STEAM_EV_BATTERY: + if (steam->quirks & STEAM_QUIRK_WIRELESS) { + rcu_read_lock(); + battery = rcu_dereference(steam->battery); + if (likely(battery)) { + steam_do_battery_event(steam, battery, data); + } else { + dbg_hid( + "%s: battery data without connect event\n", + __func__); + steam_do_connect_event(steam, true); + } + rcu_read_unlock(); + } + break; + } + return 0; +} + +static int steam_param_set_lizard_mode(const char *val, + const struct kernel_param *kp) +{ + struct steam_device *steam; + int ret; + + ret = param_set_bool(val, kp); + if (ret) + return ret; + + mutex_lock(&steam_devices_lock); + list_for_each_entry(steam, &steam_devices, list) { + mutex_lock(&steam->mutex); + if (!steam->client_opened) + steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + } + mutex_unlock(&steam_devices_lock); + return 0; +} + +static const struct kernel_param_ops steam_lizard_mode_ops = { + .set = steam_param_set_lizard_mode, + .get = param_get_bool, +}; + +module_param_cb(lizard_mode, &steam_lizard_mode_ops, &lizard_mode, 0644); +MODULE_PARM_DESC(lizard_mode, + "Enable mouse and keyboard emulation (lizard mode) when the gamepad is not in use"); + +static const struct hid_device_id steam_controllers[] = { + { /* Wired Steam Controller */ + HID_USB_DEVICE(USB_VENDOR_ID_VALVE, + USB_DEVICE_ID_STEAM_CONTROLLER) + }, + { /* Wireless Steam Controller */ + HID_USB_DEVICE(USB_VENDOR_ID_VALVE, + USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS), + .driver_data = STEAM_QUIRK_WIRELESS + }, + {} +}; + +MODULE_DEVICE_TABLE(hid, steam_controllers); + +static struct hid_driver steam_controller_driver = { + .name = "hid-steam", + .id_table = steam_controllers, + .probe = steam_probe, + .remove = steam_remove, + .raw_event = steam_raw_event, +}; + +module_hid_driver(steam_controller_driver); \ No newline at end of file diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index cfa0cb22c9b3ca7e4068101f13ba611b71a18a72..d98e471a5f7ba53c61505b8efdb4a0ea4fa23555 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -136,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits) struct tmff_device *tmff; struct hid_report *report; struct list_head *report_list; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + input_dev = hidinput->input; + tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); if (!tmff) return -ENOMEM; diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c index a29756c6ca02d064faee371143e750b19094f26c..4e7e01be99b13fcd28edc814f9d0a7425a74c1c0 100644 --- a/drivers/hid/hid-zpff.c +++ b/drivers/hid/hid-zpff.c @@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int i, error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + for (i = 0; i < 4; i++) { report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); if (!report) diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 216f0338a1f7746bbc1fcc2e0abd67fe94e59bcf..750c168971302863e532cd2eca5f8e3d06c71e7d 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -378,7 +378,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, mutex_lock(&minors_lock); dev = hidraw_table[minor]; - if (!dev) { + if (!dev || !dev->exist) { ret = -ENODEV; goto out; } diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index cac262a912c1248747d2814fa9e3b3d3512f8c76..10af8585c820d253b71301f23efae5db3b40fc14 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + /* + * There are at least 2 Primebook C11B versions, the older + * version has a product-name of "Primebook C11B", and a + * bios version / release / firmware revision of: + * V2.1.2 / 05/03/2018 / 18.2 + * The new version has "PRIMEBOOK C11B" as product-name and a + * bios version / release / firmware revision of: + * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2 + * Only the older version needs this quirk, note the newer + * version will not match as it has a different product-name. + */ + .ident = "Trekstor Primebook C11B", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Direkt-Tek DTLAPY116-2", .matches = { @@ -330,6 +349,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Direkt-Tek DTLAPY133-1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Mediacom Flexbook Edge 11", .matches = { @@ -338,6 +365,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Odys Winbook 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"), + }, + .driver_data = (void *)&sipodev_desc + }, { } /* Terminate list */ }; diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c index b9b917d2d50db3fedaa17ce8f6fcb3a05e2f38c3..c41dbb167c91ba23f3159ba1a629b118e26a0abd 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c +++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c @@ -90,7 +90,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) return 0; out: dev_err(&cl->device->dev, "error in allocating Tx pool\n"); - ishtp_cl_free_rx_ring(cl); + ishtp_cl_free_tx_ring(cl); return -ENOMEM; } diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 579bdf93be433b54a23fa8827badd7ace8dbb762..e27f7e12c05bbb066a5ddb7e0f0aa433acfe6e13 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -693,8 +693,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource) if (resource->caps.flags & POWER_METER_CAN_CAP) { if (!can_cap_in_hardware()) { - dev_err(&resource->acpi_dev->dev, - "Ignoring unsafe software power cap!\n"); + dev_warn(&resource->acpi_dev->dev, + "Ignoring unsafe software power cap!\n"); goto skip_unsafe_cap; } diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index e6b49500c52aedc8751afd7114ed2283e505ea56..8c9555313fc3db87be75921a24280bdf1f95e26d 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -38,9 +38,9 @@ #define INA3221_WARN3 0x0c #define INA3221_MASK_ENABLE 0x0f -#define INA3221_CONFIG_MODE_SHUNT BIT(1) -#define INA3221_CONFIG_MODE_BUS BIT(2) -#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3) +#define INA3221_CONFIG_MODE_SHUNT BIT(0) +#define INA3221_CONFIG_MODE_BUS BIT(1) +#define INA3221_CONFIG_MODE_CONTINUOUS BIT(2) #define INA3221_RSHUNT_DEFAULT 10000 diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index fb03449de2e075bc9b3a816815e999e630a356dd..aa6333620c37dbde8f2e85f4dfa2bba44aa12202 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -231,8 +231,12 @@ static int pwm_fan_probe(struct platform_device *pdev) ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL); if (IS_ERR(ctx->pwm)) { - dev_err(&pdev->dev, "Could not get PWM\n"); - return PTR_ERR(ctx->pwm); + ret = PTR_ERR(ctx->pwm); + + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get PWM: %d\n", ret); + + return ret; } platform_set_drvdata(pdev, ctx); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 95d13a9d398994e7da2b42b5ef573754c6d89771..2c3845b0d8537c2a96c214548e6a34614504f603 100755 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "coresight-etm4x.h" #include "coresight-etm-perf.h" @@ -184,6 +185,12 @@ static void etm4_enable_hw(void *info) if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) dev_err(drvdata->dev, "timeout while waiting for Idle Trace Status\n"); + /* + * As recommended by section 4.3.7 ("Synchronization when using the + * memory-mapped interface") of ARM IHI 0064D + */ + dsb(sy); + isb(); CS_LOCK(drvdata->base); @@ -328,8 +335,12 @@ static void etm4_disable_hw(void *info) /* EN, bit[0] Trace unit enable bit */ control &= ~0x1; - /* make sure everything completes before disabling */ - mb(); + /* + * Make sure everything completes before disabling, as recommended + * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, + * SSTATUS") of ARM IHI 0064D + */ + dsb(sy); isb(); writel_relaxed(control, drvdata->base + TRCPRGCTLR); @@ -615,7 +626,7 @@ static void etm4_set_default_config(struct etmv4_config *config) config->vinst_ctrl |= BIT(0); } -static u64 etm4_get_access_type(struct etmv4_config *config) +static u64 etm4_get_ns_access_type(struct etmv4_config *config) { u64 access_type = 0; @@ -626,17 +637,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config) * Bit[13] Exception level 1 - OS * Bit[14] Exception level 2 - Hypervisor * Bit[15] Never implemented - * - * Always stay away from hypervisor mode. */ - access_type = ETM_EXLEVEL_NS_HYP; - - if (config->mode & ETM_MODE_EXCL_KERN) - access_type |= ETM_EXLEVEL_NS_OS; + if (!is_kernel_in_hyp_mode()) { + /* Stay away from hypervisor mode for non-VHE */ + access_type = ETM_EXLEVEL_NS_HYP; + if (config->mode & ETM_MODE_EXCL_KERN) + access_type |= ETM_EXLEVEL_NS_OS; + } else if (config->mode & ETM_MODE_EXCL_KERN) { + access_type = ETM_EXLEVEL_NS_HYP; + } if (config->mode & ETM_MODE_EXCL_USER) access_type |= ETM_EXLEVEL_NS_APP; + return access_type; +} + +static u64 etm4_get_access_type(struct etmv4_config *config) +{ + u64 access_type = etm4_get_ns_access_type(config); + /* * EXLEVEL_S, bits[11:8], don't trace anything happening * in secure state. @@ -890,20 +910,10 @@ void etm4_config_trace_mode(struct etmv4_config *config) addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; /* clear default config */ - addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); + addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS | + ETM_EXLEVEL_NS_HYP); - /* - * EXLEVEL_NS, bits[15:12] - * The Exception levels are: - * Bit[12] Exception level 0 - Application - * Bit[13] Exception level 1 - OS - * Bit[14] Exception level 2 - Hypervisor - * Bit[15] Never implemented - */ - if (mode & ETM_MODE_EXCL_KERN) - addr_acc |= ETM_EXLEVEL_NS_OS; - else - addr_acc |= ETM_EXLEVEL_NS_APP; + addr_acc |= etm4_get_ns_access_type(config); config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 0f648e030f9b505598257659243c31e7b51aa1f1..09e66d5a2b3ea40019cca3650b57fb1e910e9c6d 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -421,10 +421,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev, case TMC_MEM_INTF_WIDTH_32BITS: case TMC_MEM_INTF_WIDTH_64BITS: case TMC_MEM_INTF_WIDTH_128BITS: - mask = GENMASK(31, 5); + mask = GENMASK(31, 4); break; case TMC_MEM_INTF_WIDTH_256BITS: - mask = GENMASK(31, 6); + mask = GENMASK(31, 5); break; } diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 9e013bd40f7d6276dfbdcece0db99f8544aa5a57..515c91324b806f86c155d7fdb4497e9d9586ec0f 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -148,12 +148,14 @@ static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) { int ret; - if (!csdev->enable) { - if (sink_ops(csdev)->enable) { - ret = sink_ops(csdev)->enable(csdev, mode); - if (ret) - return ret; - } + /* + * We need to make sure the "new" session is compatible with the + * existing "mode" of operation. + */ + if (sink_ops(csdev)->enable) { + ret = sink_ops(csdev)->enable(csdev, mode); + if (ret) + return ret; csdev->enable = true; } @@ -374,8 +376,14 @@ int coresight_enable_path(struct list_head *path, u32 mode) switch (type) { case CORESIGHT_DEV_TYPE_SINK: ret = coresight_enable_sink(csdev, mode); + /* + * Sink is the first component turned on. If we + * failed to enable the sink, there are no components + * that need disabling. Disabling the path here + * would mean we could disrupt an existing session. + */ if (ret) - goto err; + goto out; break; case CORESIGHT_DEV_TYPE_SOURCE: /* sources are enabled from either sysFS or Perf */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index b8353ad08fd72f3c811c69903bf0c675d80445d0..f9eca47b7656554ff41956327ac1abea7ee8ec31 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -397,12 +397,13 @@ config I2C_BCM_KONA If you do not need KONA I2C interface, say N. config I2C_BRCMSTB - tristate "BRCM Settop I2C controller" - depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + tristate "BRCM Settop/DSL I2C controller" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \ + COMPILE_TEST default y help If you say yes to this option, support will be included for the - I2C interface on the Broadcom Settop SoCs. + I2C interface on the Broadcom Settop/DSL SoCs. If you do not need I2C interface, say N. diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index c811af4c8d817bcf353068bf2e3f95f56953155b..e420b41a34bae39ac8ef3f8a7725efe19d9ac71a 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -212,6 +212,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data) if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) { /* We got a NACKIE */ readb(riic->base + RIIC_ICDRR); /* dummy read */ + riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2); riic->err = -ENXIO; } else if (riic->bytes_left) { return IRQ_NONE; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index c3888822add1a341c51e8176c5594047251a8bba..b6254ce9ab3b3634f606d0f5bedd2286b654872b 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -125,7 +125,7 @@ #define BMC150_ACCEL_SLEEP_1_SEC 0x0F #define BMC150_ACCEL_REG_TEMP 0x08 -#define BMC150_ACCEL_TEMP_CENTER_VAL 24 +#define BMC150_ACCEL_TEMP_CENTER_VAL 23 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 9704090b79084f3206703c248aed3d774831744f..cd6dbe95125b3f334db88625dbe3f3f716777b84 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -817,10 +817,10 @@ static int ad799x_probe(struct i2c_client *client, ret = ad799x_write_config(st, st->chip_config->default_config); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; ret = ad799x_read_config(st); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; st->config = ret; ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c index 3854d201a5d6c3a05a1a118c03cee4e815314e20..68dd0be1ac0763977e85625756b5e7361c97dd73 100644 --- a/drivers/iio/dac/mcp4922.c +++ b/drivers/iio/dac/mcp4922.c @@ -94,17 +94,22 @@ static int mcp4922_write_raw(struct iio_dev *indio_dev, long mask) { struct mcp4922_state *state = iio_priv(indio_dev); + int ret; if (val2 != 0) return -EINVAL; switch (mask) { case IIO_CHAN_INFO_RAW: - if (val > GENMASK(chan->scan_type.realbits-1, 0)) + if (val < 0 || val > GENMASK(chan->scan_type.realbits - 1, 0)) return -EINVAL; val <<= chan->scan_type.shift; - state->value[chan->channel] = val; - return mcp4922_spi_write(state, chan->channel, val); + + ret = mcp4922_spi_write(state, chan->channel, val); + if (!ret) + state->value[chan->channel] = val; + return ret; + default: return -EINVAL; } diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 12898424d8382f5953bcf2618e31f96c1874c56b..6f975538996cdc0936ffd49ab9a77adc7f499bf9 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -266,8 +266,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2) struct adis16480 *st = iio_priv(indio_dev); unsigned int t; + if (val < 0 || val2 < 0) + return -EINVAL; + t = val * 1000 + val2 / 1000; - if (t <= 0) + if (t == 0) return -EINVAL; t = 2460000 / t; diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h index 9605ab251571645b9fc81ef07688b41b8d13db5b..7b4963df780d1bd77a20bda9e668312f96c58001 100644 --- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h +++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h @@ -18,7 +18,7 @@ #include #define ST_ASM330LHH_REVISION "2.0.1" -#define ST_ASM330LHH_PATCH "1" +#define ST_ASM330LHH_PATCH "2" #define ST_ASM330LHH_VERSION "v" \ ST_ASM330LHH_REVISION \ @@ -220,7 +220,10 @@ struct st_asm330lhh_hw { u8 enable_mask; s64 ts_offset; + u32 hw_val; + u32 hw_val_old; s64 hw_ts; + s64 hw_ts_high; s64 delta_ts; s64 ts; s64 tsample; diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c index 734d735e2aed819b9f8052ba6fd86be3dafb48fc..c224427d0153d65fc64c74c8ddda28f26bf9a4df 100644 --- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c +++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c @@ -64,6 +64,8 @@ static inline int st_asm330lhh_reset_hwts(struct st_asm330lhh_hw *hw) hw->ts_offset = hw->ts; hw->hw_ts_old = 0ull; hw->tsample = 0ull; + hw->hw_ts_high = 0ull; + hw->hw_val_old = 0ull; return hw->tf->write(hw->dev, ST_ASM330LHH_REG_TS2_ADDR, sizeof(data), &data); @@ -257,14 +259,12 @@ static int st_asm330lhh_read_fifo(struct st_asm330lhh_hw *hw) struct iio_dev *iio_dev; __le16 fifo_status; u16 fifo_depth; - u32 val; int ts_processed = 0; s64 hw_ts = 0ull, delta_hw_ts, cpu_timestamp; ts_irq = hw->ts - hw->delta_ts; - do - { + do { err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_FIFO_DIFFL_ADDR, sizeof(fifo_status), (u8 *)&fifo_status); if (err < 0) @@ -289,8 +289,14 @@ static int st_asm330lhh_read_fifo(struct st_asm330lhh_hw *hw) tag = buf[i] >> 3; if (tag == ST_ASM330LHH_TS_TAG) { - val = get_unaligned_le32(ptr); - hw->hw_ts = val * ST_ASM330LHH_TS_DELTA_NS; + hw->hw_val = get_unaligned_le32(ptr); + + /* check for timer rollover */ + if (hw->hw_val < hw->hw_val_old) + hw->hw_ts_high++; + hw->hw_ts = + (hw->hw_val + (hw->hw_ts_high << 32)) + * ST_ASM330LHH_TS_DELTA_NS; ts_delta_hw_ts = hw->hw_ts - hw->hw_ts_old; hw_ts += ts_delta_hw_ts; ts_delta_offs = @@ -301,6 +307,7 @@ static int st_asm330lhh_read_fifo(struct st_asm330lhh_hw *hw) ts_irq += (hw->hw_ts + ts_delta_offs); hw->hw_ts_old = hw->hw_ts; + hw->hw_val_old = hw->hw_val; ts_processed++; if (!hw->tsample) diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 78c9b3a6453ae57286923a42d9284774b2a696c0..be55477de2acc764cadf1023248bea63a5b5049f 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -695,6 +695,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) struct iio_dev *iio = _iio; struct opt3001 *opt = iio_priv(iio); int ret; + bool wake_result_ready_queue = false; if (!opt->ok_to_ignore_lock) mutex_lock(&opt->lock); @@ -729,13 +730,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) } opt->result = ret; opt->result_ready = true; - wake_up(&opt->result_ready_queue); + wake_result_ready_queue = true; } out: if (!opt->ok_to_ignore_lock) mutex_unlock(&opt->lock); + if (wake_result_ready_queue) + wake_up(&opt->result_ready_queue); + return IRQ_HANDLED; } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 85d4ef319c905ee8112bc0f9100a5669a46f9e93..dcfbf326f45c9110c57da13fea7142bfa9116712 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2119,9 +2119,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, conn_id->cm_id.iw = NULL; cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + mutex_unlock(&listen_id->handler_mutex); cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); - goto out; + return ret; } mutex_unlock(&conn_id->handler_mutex); diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index ff12b8d176ced89fdfc3e427498dcd80a7150773..c4e7aa91498bfbab16ed740ad97c1f11daa492e9 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -102,12 +102,12 @@ static void ib_cq_poll_work(struct work_struct *work) completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) - queue_work(ib_comp_wq, &cq->work); + queue_work(cq->comp_wq, &cq->work); } static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) { - queue_work(ib_comp_wq, &cq->work); + queue_work(cq->comp_wq, &cq->work); } /** @@ -159,9 +159,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; case IB_POLL_WORKQUEUE: + case IB_POLL_UNBOUND_WORKQUEUE: cq->comp_handler = ib_cq_completion_workqueue; INIT_WORK(&cq->work, ib_cq_poll_work); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? + ib_comp_wq : ib_comp_unbound_wq; break; default: ret = -EINVAL; @@ -196,6 +199,7 @@ void ib_free_cq(struct ib_cq *cq) irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: + case IB_POLL_UNBOUND_WORKQUEUE: cancel_work_sync(&cq->work); break; default: diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 15f4bdf89fe19b89de08685db8654fdcf26e1cfc..4b947d5cafe284cd110fb303688630efdba62b94 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -59,6 +59,7 @@ struct ib_client_data { }; struct workqueue_struct *ib_comp_wq; +struct workqueue_struct *ib_comp_unbound_wq; struct workqueue_struct *ib_wq; EXPORT_SYMBOL_GPL(ib_wq); @@ -1005,10 +1006,19 @@ static int __init ib_core_init(void) goto err; } + ib_comp_unbound_wq = + alloc_workqueue("ib-comp-unb-wq", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); + if (!ib_comp_unbound_wq) { + ret = -ENOMEM; + goto err_comp; + } + ret = class_register(&ib_class); if (ret) { pr_warn("Couldn't create InfiniBand device class\n"); - goto err_comp; + goto err_comp_unbound; } ret = ibnl_init(); @@ -1055,6 +1065,8 @@ static int __init ib_core_init(void) ibnl_cleanup(); err_sysfs: class_unregister(&ib_class); +err_comp_unbound: + destroy_workqueue(ib_comp_unbound_wq); err_comp: destroy_workqueue(ib_comp_wq); err: @@ -1071,6 +1083,7 @@ static void __exit ib_core_cleanup(void) addr_cleanup(); ibnl_cleanup(); class_unregister(&ib_class); + destroy_workqueue(ib_comp_unbound_wq); destroy_workqueue(ib_comp_wq); /* Make sure that any pending umem accounting work is done. */ destroy_workqueue(ib_wq); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 25a28e7060725e6811c6c334049b94a053ceacce..a1f059a9c75191f42ff0aee2a4c9fcf06817ca17 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3163,7 +3163,7 @@ static int ib_mad_port_open(struct ib_device *device, } port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, - IB_POLL_WORKQUEUE); + IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index e5752352e0fb155d2a40dcda211638634e09378e..605d50ad123ccd5602e8b206019fddb8773200bc 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -490,7 +490,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); - kfree_skb(skb); return 0; } @@ -501,7 +500,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); - kfree_skb(skb); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 0c215353adb991347ad7a28a5af60b5a6b9dcb3e..2b1dd60a29fa32a4200189f13de67a84f5419823 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -264,13 +264,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, struct sk_buff *skb) { int err; - struct fw_ri_tpte tpt; + struct fw_ri_tpte *tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; + tpt = kmalloc(sizeof(*tpt), GFP_KERNEL); + if (!tpt) + return -ENOMEM; + stag_state = stag_state > 0; stag_idx = (*stag) >> 8; @@ -280,6 +284,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); + kfree(tpt); return -ENOMEM; } mutex_lock(&rdev->stats.lock); @@ -294,28 +299,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, /* write TPT entry */ if (reset_tpt_entry) - memset(&tpt, 0, sizeof(tpt)); + memset(tpt, 0, sizeof(*tpt)); else { - tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | + tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | FW_RI_TPTE_STAGSTATE_V(stag_state) | FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); - tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | + tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| FW_RI_TPTE_PS_V(page_size)); - tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( + tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); - tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); - tpt.va_hi = cpu_to_be32((u32)(to >> 32)); - tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); - tpt.dca_mwbcnt_pstag = cpu_to_be32(0); - tpt.len_hi = cpu_to_be32((u32)(len >> 32)); + tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); + tpt->va_hi = cpu_to_be32((u32)(to >> 32)); + tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); + tpt->dca_mwbcnt_pstag = cpu_to_be32(0); + tpt->len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), - sizeof(tpt), &tpt, skb); + sizeof(*tpt), tpt, skb); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); @@ -323,6 +328,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } + kfree(tpt); return err; } diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 9487c9bb892087d4dd3b994576fa2eaec2db7d0e..908d4dd015628df42ff9794dae7336c298285bdf 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -2016,7 +2016,7 @@ struct opa_port_status_req { __be32 vl_select_mask; }; -#define VL_MASK_ALL 0x000080ff +#define VL_MASK_ALL 0x00000000000080ffUL struct opa_port_status_rsp { __u8 port_num; @@ -2315,15 +2315,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, } static void a0_portstatus(struct hfi1_pportdata *ppd, - struct opa_port_status_rsp *rsp, u32 vl_select_mask) + struct opa_port_status_rsp *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2347,12 +2346,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, (struct opa_port_status_req *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_port_status_rsp *rsp; - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl; size_t response_data_size; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u8 port_num = req->port_num; - u8 num_vls = hweight32(vl_select_mask); + u8 num_vls = hweight64(vl_select_mask); struct _vls_pctrs *vlinfo; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -2386,7 +2385,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, hfi1_read_link_quality(dd, &rsp->link_quality_indicator); - rsp->vl_select_mask = cpu_to_be32(vl_select_mask); + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, @@ -2449,8 +2448,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); @@ -2487,7 +2485,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, vfi++; } - a0_portstatus(ppd, rsp, vl_select_mask); + a0_portstatus(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -2534,16 +2532,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, - u32 vl_select_mask) +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2599,7 +2595,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, u64 port_mask; u8 port_num; unsigned long vl; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; @@ -2668,8 +2664,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = @@ -2712,7 +2707,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(ppd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -2807,7 +2802,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct _vls_ectrs *vlinfo; unsigned long vl; u64 port_mask, tmp; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; req = (struct opa_port_error_counters64_msg *)pmp->data; @@ -2866,8 +2861,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, @@ -3077,7 +3071,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u64 portn = be64_to_cpu(req->port_select_mask[3]); u32 counter_select = be32_to_cpu(req->counter_select_mask); - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl; if ((nports != 1) || (portn != 1 << port)) { @@ -3169,8 +3163,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_UNCORRECTABLE_ERRORS) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 717626046ee500c772f4e18dbf64105dbbdb9312..fbe662602f0a8ca950457a860f3b537d9ea051c1 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -377,7 +377,9 @@ int pcie_speeds(struct hfi1_devdata *dd) /* * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed */ - if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { + if (parent && + (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT || + dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) { dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd->link_gen3_capable = 0; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 85637696f6e9600a638e78a48aa9272d18c08883..282a726351c816d342f71aca1facc2a2f60afcbc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1652,7 +1652,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev, unsigned long flags; rtnl_lock(); - for_each_netdev_rcu(&init_net, ip_dev) { + for_each_netdev(&init_net, ip_dev) { if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) && (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index ded76c101dde3d74a36c85fb9a71f90050969b55..834b06aacc2bfcb8f4003c232007a991dba29f14 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -989,7 +989,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) goto err_free_dev; } - if (mthca_cmd_init(mdev)) { + err = mthca_cmd_init(mdev); + if (err) { mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index df15b6d7b645e2afaa730d100c9e32873a1f9232..b03a6206d9be7e690648e547d9567eab34dbc515 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -250,6 +250,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { + /* read retries of partial data may restart from + * read response first or response only. + */ + if ((pkt->psn == wqe->first_psn && + pkt->opcode == + IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) || + (wqe->first_psn == wqe->last_psn && + pkt->opcode == + IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY)) + break; + return COMPST_ERROR; } break; @@ -486,11 +497,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - qp->comp.opcode = -1; - - if (pkt) { - if (psn_compare(pkt->psn, qp->comp.psn) >= 0) - qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; + if (pkt && wqe->state == wqe_state_pending) { + if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { + qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; + qp->comp.opcode = -1; + } if (qp->req.wait_psn) { qp->req.wait_psn = 0; diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 0f9fe2ca2a91a252cbed7bb86843c6b33d158c32..6fb771290c5661b4651a8c602377b13b2ac89686 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -72,9 +72,6 @@ static void req_retry(struct rxe_qp *qp) int npsn; int first = 1; - wqe = queue_head(qp->sq.queue); - npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK; - qp->req.wqe_index = consumer_index(qp->sq.queue); qp->req.psn = qp->comp.psn; qp->req.opcode = -1; @@ -106,11 +103,17 @@ static void req_retry(struct rxe_qp *qp) if (first) { first = 0; - if (mask & WR_WRITE_OR_SEND_MASK) + if (mask & WR_WRITE_OR_SEND_MASK) { + npsn = (qp->comp.psn - wqe->first_psn) & + BTH_PSN_MASK; retry_first_write_send(qp, wqe, mask, npsn); + } - if (mask & WR_READ_MASK) + if (mask & WR_READ_MASK) { + npsn = (wqe->dma.length - wqe->dma.resid) / + qp->mtu; wqe->iova += npsn * qp->mtu; + } } wqe->state = wqe_state_posted; @@ -439,7 +442,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (pkt->mask & RXE_RETH_MASK) { reth_set_rkey(pkt, ibwr->wr.rdma.rkey); reth_set_va(pkt, wqe->iova); - reth_set_len(pkt, wqe->dma.length); + reth_set_len(pkt, wqe->dma.resid); } if (pkt->mask & RXE_IMMDT_MASK) diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 81ae2e30dd12540cdfeb41538b90676c81ee590c..27a7e4406f34306835dadc7e3e9aa7af94b44d2c 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -590,13 +590,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) ib_conn->post_recv_buf_count--; } -static inline void +static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) { - if (likely(rkey == desc->rsc.mr->rkey)) + if (likely(rkey == desc->rsc.mr->rkey)) { desc->rsc.mr_valid = 0; - else if (likely(rkey == desc->pi_ctx->sig_mr->rkey)) + } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) { desc->pi_ctx->sig_mr_valid = 0; + } else { + iser_err("Bogus remote invalidation for rkey %#x\n", rkey); + return -EINVAL; + } + + return 0; } static int @@ -624,12 +630,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn, if (iser_task->dir[ISER_DIR_IN]) { desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h; - iser_inv_desc(desc, rkey); + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; } if (iser_task->dir[ISER_DIR_OUT]) { desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h; - iser_inv_desc(desc, rkey); + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; } } else { iser_err("failed to get task for itt=%d\n", hdr->itt); diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c index fcc6c3368182b9c376c01a4506d60a276dbceb1a..ea3f0f5eb534667b6ab15cc88c3acaf6a39121f0 100644 --- a/drivers/input/ff-memless.c +++ b/drivers/input/ff-memless.c @@ -501,6 +501,15 @@ static void ml_ff_destroy(struct ff_device *ff) { struct ml_device *ml = ff->private; + /* + * Even though we stop all playing effects when tearing down + * an input device (via input_device_flush() that calls into + * input_ff_flush() that stops and erases all effects), we + * do not actually stop the timer, and therefore we should + * do it here. + */ + del_timer_sync(&ml->timer); + kfree(ml->private); } diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c index bb863e062b03b3cf66317c8899401478e2f06930..eaf5ecc431c90f6dfbdeb79a2ef2e5e490c35b1a 100644 --- a/drivers/input/misc/da9063_onkey.c +++ b/drivers/input/misc/da9063_onkey.c @@ -247,10 +247,7 @@ static int da9063_onkey_probe(struct platform_device *pdev) onkey->input->phys = onkey->phys; onkey->input->dev.parent = &pdev->dev; - if (onkey->key_power) - input_set_capability(onkey->input, EV_KEY, KEY_POWER); - - input_set_capability(onkey->input, EV_KEY, KEY_SLEEP); + input_set_capability(onkey->input, EV_KEY, KEY_POWER); INIT_DELAYED_WORK(&onkey->work, da9063_poll_on); diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index 2e934aef3d2a8b675635fcdf9ccd4af807c4ef87..b669cdc8c8c66b53646919307f0744c1067073e1 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -364,7 +364,7 @@ static const struct vb2_ops rmi_f54_queue_ops = { static const struct vb2_queue rmi_f54_queue = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ, - .buf_struct_size = sizeof(struct vb2_buffer), + .buf_struct_size = sizeof(struct vb2_v4l2_buffer), .ops = &rmi_f54_queue_ops, .mem_ops = &vb2_vmalloc_memops, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, @@ -617,7 +617,7 @@ static int rmi_f54_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; - drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); + drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask); return 0; } @@ -744,6 +744,7 @@ static void rmi_f54_remove(struct rmi_function *fn) video_unregister_device(&f54->vdev); v4l2_device_unregister(&f54->v4l2); + destroy_workqueue(f54->workqueue); } struct rmi_function_handler rmi_f54_handler = { diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index f502c8488be86361592187acdffdecb164c4dfbd..867772878c0c8fc1a1588a3bf9733a35da415f4a 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -504,20 +504,33 @@ static int __maybe_unused silead_ts_suspend(struct device *dev) static int __maybe_unused silead_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); + bool second_try = false; int error, status; silead_ts_set_power(client, SILEAD_POWER_ON); + retry: error = silead_ts_reset(client); if (error) return error; + if (second_try) { + error = silead_ts_load_fw(client); + if (error) + return error; + } + error = silead_ts_startup(client); if (error) return error; status = silead_ts_get_status(client); if (status != SILEAD_STATUS_OK) { + if (!second_try) { + second_try = true; + dev_dbg(dev, "Reloading firmware after unsuccessful resume\n"); + goto retry; + } dev_err(dev, "Resume error, status: 0x%02x\n", status); return -ENODEV; } diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index e943678ce54cd48c64240d63e964408e670d4417..f1c574d6be17f1b42dd3b0c0a9adda27fa247017 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -203,6 +203,7 @@ static int st1232_ts_probe(struct i2c_client *client, input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); __set_bit(EV_SYN, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __set_bit(EV_ABS, input_dev->evbit); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c1233d0288a035d2ef69a662f7eb04f13c5012c0..e81acb2b6ee7d27e6e68de2622ce952c74dbde9c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1321,18 +1321,21 @@ static void domain_flush_devices(struct protection_domain *domain) * another level increases the size of the address space by 9 bits to a size up * to 64 bits. */ -static bool increase_address_space(struct protection_domain *domain, +static void increase_address_space(struct protection_domain *domain, gfp_t gfp) { + unsigned long flags; u64 *pte; - if (domain->mode == PAGE_MODE_6_LEVEL) + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) /* address space already 64 bit large */ - return false; + goto out; pte = (void *)get_zeroed_page(gfp); if (!pte) - return false; + goto out; *pte = PM_LEVEL_PDE(domain->mode, virt_to_phys(domain->pt_root)); @@ -1340,7 +1343,10 @@ static bool increase_address_space(struct protection_domain *domain, domain->mode += 1; domain->updated = true; - return true; +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return; } static u64 *alloc_pte(struct protection_domain *domain, @@ -2589,7 +2595,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, bus_addr = address + s->dma_address + (j << PAGE_SHIFT); phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); + ret = iommu_map_page(domain, bus_addr, phys_addr, + PAGE_SIZE, prot, + GFP_ATOMIC | __GFP_NOWARN); if (ret) goto out_unmap; diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c index 180edf32dfc1e5dadb036476385d193b48db43ff..05ed3117a093710a95c934876f37119e19693663 100644 --- a/drivers/iommu/msm_dma_iommu_mapping.c +++ b/drivers/iommu/msm_dma_iommu_mapping.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,7 +41,7 @@ struct msm_iommu_map { struct list_head lnode; struct rb_node node; struct device *dev; - struct scatterlist sgl; + struct scatterlist *sgl; unsigned int nents; enum dma_data_direction dir; struct msm_iommu_meta *meta; @@ -148,6 +148,22 @@ static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf) static void msm_iommu_meta_put(struct msm_iommu_meta *meta); +static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents) +{ + struct scatterlist *next, *s; + int i; + struct sg_table table; + + if (sg_alloc_table(&table, nents, GFP_KERNEL)) + return NULL; + next = table.sgl; + for_each_sg(sg, s, nents, i) { + *next = *s; + next = sg_next(next); + } + return table.sgl; +} + static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_buf *dma_buf, @@ -191,20 +207,25 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg, } ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs); - if (ret != nents) { + if (!ret) { + kfree(iommu_map); + goto out_unlock; + } + + iommu_map->sgl = clone_sgl(sg, nents); + if (!iommu_map->sgl) { kfree(iommu_map); + ret = -ENOMEM; goto out_unlock; } + iommu_map->nents = nents; + iommu_map->dev = dev; kref_init(&iommu_map->ref); if (late_unmap) kref_get(&iommu_map->ref); iommu_map->meta = iommu_meta; - iommu_map->sgl.dma_address = sg->dma_address; - iommu_map->sgl.dma_length = sg->dma_length; - iommu_map->dev = dev; iommu_map->dir = dir; - iommu_map->nents = nents; iommu_map->map_attrs = attrs; iommu_map->buf_start_addr = sg_phys(sg); msm_iommu_add(iommu_meta, iommu_map); @@ -214,8 +235,8 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg, dir == iommu_map->dir && attrs == iommu_map->map_attrs && sg_phys(sg) == iommu_map->buf_start_addr) { - sg->dma_address = iommu_map->sgl.dma_address; - sg->dma_length = iommu_map->sgl.dma_length; + sg->dma_address = iommu_map->sgl->dma_address; + sg->dma_length = iommu_map->sgl->dma_length; kref_get(&iommu_map->ref); if (is_device_dma_coherent(dev)) @@ -315,9 +336,14 @@ static void msm_iommu_map_release(struct kref *kref) { struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map, ref); + struct sg_table table; + table.nents = table.orig_nents = map->nents; + table.sgl = map->sgl; list_del(&map->lnode); - dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir); + + dma_unmap_sg(map->dev, map->sgl, map->nents, map->dir); + sg_free_table(&table); kfree(map); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 83ca754250fb7b0c4539393c7fc9f4d9021371cc..0c0cd2768d6e96478122c412ea67a9fb29c07238 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1519,14 +1519,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct its_device *its_dev = irq_data_get_irq_chip_data(d); int i; + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + for (i = 0; i < nr_irqs; i++) { struct irq_data *data = irq_domain_get_irq_data(domain, virq + i); - u32 event = its_get_event_id(data); - - /* Mark interrupt index as unused */ - clear_bit(event, its_dev->event_map.lpi_map); - /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 6a2df3297e77c92e4ca47d460ad41c9363425842..691ad069444d49beb9185ce6ee73ab6af046b52b 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -687,6 +687,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos if (!cdev->ap.applid) return -ENODEV; + if (count < CAPIMSG_BASELEN) + return -EINVAL; + skb = alloc_skb(count, GFP_USER); if (!skb) return -ENOMEM; @@ -697,7 +700,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos } mlen = CAPIMSG_LEN(skb->data); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { + if (count < CAPI_DATA_B3_REQ_LEN || + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { kfree_skb(skb); return -EINVAL; } @@ -710,6 +714,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { + if (count < CAPI_DISCONNECT_B3_RESP_LEN) { + kfree_skb(skb); + return -EINVAL; + } mutex_lock(&cdev->lock); capincci_free(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->lock); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index d7c986fb0b3b427b83464df44c04adf163b220d6..d204e530fcaaf49c3e79c3fddd1c545e35b9eb2d 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -766,6 +766,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; + if (!capable(CAP_NET_RAW)) + return -EPERM; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 592f597d895181d0ce1e5dd697a7d37ab09f2ef3..8261afbbafb0588b703ead3266aae74f1572798a 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -1180,8 +1180,7 @@ static int ctrl_teimanager(struct manager *mgr, void *arg) { /* currently we only have one option */ - int *val = (int *)arg; - int ret = 0; + unsigned int *val = (unsigned int *)arg; switch (val[0]) { case IMCLEAR_L2: @@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg) test_and_clear_bit(OPTION_L1_HOLD, &mgr->options); break; default: - ret = -EINVAL; + return -EINVAL; } - return ret; + return 0; } /* This function does create a L2 for fixed TEI in NT Mode */ diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index b75333803a63776608acf7e5810c118123c6e166..9f5e4d04efad5f1cd8354ffa6dede18da3d49689 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip) { const struct firmware *fw = chip->fw; - if (fw->size > LP5562_PROGRAM_LENGTH) { + /* + * the firmware is encoded in ascii hex character, with 2 chars + * per byte + */ + if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) { dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n", fw->size); return; diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index ad6223e8834043ba710c9d1bd5c41ae2a156dfbe..3d310dd60a0be37f1e419461bf78cae952966e06 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -22,14 +22,6 @@ #define VERSION "1.0" -#define DEBUG - -#ifdef DEBUG -#define DBG(args...) printk(args) -#else -#define DBG(args...) do { } while(0) -#endif - /* If the cache is older than 800ms we'll refetch it */ #define MAX_AGE msecs_to_jiffies(800) @@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id, buf[i+2] = data[3]; buf[i+3] = data[2]; } -#ifdef DEBUG - DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id); - for (i = 0; i < len; ++i) - DBG(" %x", buf[i]); - DBG("\n"); -#endif + printk(KERN_DEBUG "sat %d partition %x:", sat_id, id); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); if (size) *size = len; return (struct smu_sdbp_header *) buf; @@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat) if (err < 0) return err; sat->last_read = jiffies; + #ifdef LOTSA_DEBUG { int i; - DBG(KERN_DEBUG "wf_sat_get: data is"); - for (i = 0; i < 16; ++i) - DBG(" %.2x", sat->cache[i]); - DBG("\n"); + printk(KERN_DEBUG "wf_sat_get: data is"); + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET, + 16, 1, sat->cache, 16, false); } #endif return 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f37c2deb3164247983c5fa0064215a0b4fe4512a..2dc2bbb0e9b90bfd6c811956c54de68f9a833d57 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -904,6 +904,7 @@ static void cached_dev_detach_finish(struct work_struct *w) bch_write_bdev_super(dc, &cl); closure_sync(&cl); + calc_cached_dev_sectors(dc->disk.c); bcache_device_detach(&dc->disk); list_move(&dc->list, &uncached_devices); diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c index 03af174485d3066c62659f252c5306fc86fa3f68..fa2432a89bacea54c1826a8a26528d380aeeb91e 100644 --- a/drivers/md/dm-bio-prison.c +++ b/drivers/md/dm-bio-prison.c @@ -32,7 +32,7 @@ static struct kmem_cache *_cell_cache; */ struct dm_bio_prison *dm_bio_prison_create(void) { - struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL); + struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL); if (!prison) return NULL; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index c837defb5e4dd0d42fa2b7c74883241c6b6f5215..48bb5a879e6f40fedcb35a44502cd2661c6936ac 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -32,7 +32,8 @@ #define DM_BUFIO_MEMORY_PERCENT 2 #define DM_BUFIO_VMALLOC_PERCENT 25 -#define DM_BUFIO_WRITEBACK_PERCENT 75 +#define DM_BUFIO_WRITEBACK_RATIO 3 +#define DM_BUFIO_LOW_WATERMARK_RATIO 16 /* * Check buffer ages in this interval (seconds) @@ -137,10 +138,12 @@ enum data_mode { struct dm_buffer { struct rb_node node; struct list_head lru_list; + struct list_head global_list; sector_t block; void *data; enum data_mode data_mode; unsigned char list_mode; /* LIST_* */ + unsigned accessed; unsigned hold_count; int read_error; int write_error; @@ -209,7 +212,11 @@ static unsigned long dm_bufio_cache_size; */ static unsigned long dm_bufio_cache_size_latch; -static DEFINE_SPINLOCK(param_spinlock); +static DEFINE_SPINLOCK(global_spinlock); + +static LIST_HEAD(global_queue); + +static unsigned long global_num = 0; /* * Buffers are freed after this timeout @@ -225,11 +232,6 @@ static unsigned long dm_bufio_current_allocated; /*----------------------------------------------------------------*/ -/* - * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count - */ -static unsigned long dm_bufio_cache_size_per_client; - /* * The current number of clients. */ @@ -241,11 +243,15 @@ static int dm_bufio_client_count; static LIST_HEAD(dm_bufio_all_clients); /* - * This mutex protects dm_bufio_cache_size_latch, - * dm_bufio_cache_size_per_client and dm_bufio_client_count + * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count */ static DEFINE_MUTEX(dm_bufio_clients_lock); +static struct workqueue_struct *dm_bufio_wq; +static struct delayed_work dm_bufio_cleanup_old_work; +static struct work_struct dm_bufio_replacement_work; + + #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING static void buffer_record_stack(struct dm_buffer *b) { @@ -306,15 +312,23 @@ static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) /*----------------------------------------------------------------*/ -static void adjust_total_allocated(enum data_mode data_mode, long diff) +static void adjust_total_allocated(struct dm_buffer *b, bool unlink) { + enum data_mode data_mode; + long diff; + static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { &dm_bufio_allocated_kmem_cache, &dm_bufio_allocated_get_free_pages, &dm_bufio_allocated_vmalloc, }; - spin_lock(¶m_spinlock); + data_mode = b->data_mode; + diff = (long)b->c->block_size; + if (unlink) + diff = -diff; + + spin_lock(&global_spinlock); *class_ptr[data_mode] += diff; @@ -323,7 +337,19 @@ static void adjust_total_allocated(enum data_mode data_mode, long diff) if (dm_bufio_current_allocated > dm_bufio_peak_allocated) dm_bufio_peak_allocated = dm_bufio_current_allocated; - spin_unlock(¶m_spinlock); + b->accessed = 1; + + if (!unlink) { + list_add(&b->global_list, &global_queue); + global_num++; + if (dm_bufio_current_allocated > dm_bufio_cache_size) + queue_work(dm_bufio_wq, &dm_bufio_replacement_work); + } else { + list_del(&b->global_list); + global_num--; + } + + spin_unlock(&global_spinlock); } /* @@ -344,9 +370,6 @@ static void __cache_size_refresh(void) dm_bufio_default_cache_size); dm_bufio_cache_size_latch = dm_bufio_default_cache_size; } - - dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch / - (dm_bufio_client_count ? : 1); } /* @@ -453,8 +476,6 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) return NULL; } - adjust_total_allocated(b->data_mode, (long)c->block_size); - #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING memset(&b->stack_trace, 0, sizeof(b->stack_trace)); #endif @@ -468,8 +489,6 @@ static void free_buffer(struct dm_buffer *b) { struct dm_bufio_client *c = b->c; - adjust_total_allocated(b->data_mode, -(long)c->block_size); - free_buffer_data(c, b->data, b->data_mode); kfree(b); } @@ -487,6 +506,8 @@ static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) list_add(&b->lru_list, &c->lru[dirty]); __insert(b->c, b); b->last_accessed = jiffies; + + adjust_total_allocated(b, false); } /* @@ -501,6 +522,8 @@ static void __unlink_buffer(struct dm_buffer *b) c->n_buffers[b->list_mode]--; __remove(b->c, b); list_del(&b->lru_list); + + adjust_total_allocated(b, true); } /* @@ -510,6 +533,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty) { struct dm_bufio_client *c = b->c; + b->accessed = 1; + BUG_ON(!c->n_buffers[b->list_mode]); c->n_buffers[b->list_mode]--; @@ -922,33 +947,6 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, } } -/* - * Get writeback threshold and buffer limit for a given client. - */ -static void __get_memory_limit(struct dm_bufio_client *c, - unsigned long *threshold_buffers, - unsigned long *limit_buffers) -{ - unsigned long buffers; - - if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { - if (mutex_trylock(&dm_bufio_clients_lock)) { - __cache_size_refresh(); - mutex_unlock(&dm_bufio_clients_lock); - } - } - - buffers = dm_bufio_cache_size_per_client >> - (c->sectors_per_block_bits + SECTOR_SHIFT); - - if (buffers < c->minimum_buffers) - buffers = c->minimum_buffers; - - *limit_buffers = buffers; - *threshold_buffers = mult_frac(buffers, - DM_BUFIO_WRITEBACK_PERCENT, 100); -} - /* * Check if we're over watermark. * If we are over threshold_buffers, start freeing buffers. @@ -957,23 +955,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, static void __check_watermark(struct dm_bufio_client *c, struct list_head *write_list) { - unsigned long threshold_buffers, limit_buffers; - - __get_memory_limit(c, &threshold_buffers, &limit_buffers); - - while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] > - limit_buffers) { - - struct dm_buffer *b = __get_unclaimed_buffer(c); - - if (!b) - return; - - __free_buffer_wake(b); - cond_resched(); - } - - if (c->n_buffers[LIST_DIRTY] > threshold_buffers) + if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) __write_dirty_buffers_async(c, 1, write_list); } @@ -1814,6 +1796,74 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) dm_bufio_unlock(c); } +static void do_global_cleanup(struct work_struct *w) +{ + struct dm_bufio_client *locked_client = NULL; + struct dm_bufio_client *current_client; + struct dm_buffer *b; + unsigned spinlock_hold_count; + unsigned long threshold = dm_bufio_cache_size - + dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; + unsigned long loops = global_num * 2; + + mutex_lock(&dm_bufio_clients_lock); + + while (1) { + cond_resched(); + + spin_lock(&global_spinlock); + if (unlikely(dm_bufio_current_allocated <= threshold)) + break; + + spinlock_hold_count = 0; +get_next: + if (!loops--) + break; + if (unlikely(list_empty(&global_queue))) + break; + b = list_entry(global_queue.prev, struct dm_buffer, global_list); + + if (b->accessed) { + b->accessed = 0; + list_move(&b->global_list, &global_queue); + if (likely(++spinlock_hold_count < 16)) + goto get_next; + spin_unlock(&global_spinlock); + continue; + } + + current_client = b->c; + if (unlikely(current_client != locked_client)) { + if (locked_client) + dm_bufio_unlock(locked_client); + + if (!dm_bufio_trylock(current_client)) { + spin_unlock(&global_spinlock); + dm_bufio_lock(current_client); + locked_client = current_client; + continue; + } + + locked_client = current_client; + } + + spin_unlock(&global_spinlock); + + if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) { + spin_lock(&global_spinlock); + list_move(&b->global_list, &global_queue); + spin_unlock(&global_spinlock); + } + } + + spin_unlock(&global_spinlock); + + if (locked_client) + dm_bufio_unlock(locked_client); + + mutex_unlock(&dm_bufio_clients_lock); +} + static void cleanup_old_buffers(void) { unsigned long max_age_hz = get_max_age_hz(); @@ -1829,14 +1879,11 @@ static void cleanup_old_buffers(void) mutex_unlock(&dm_bufio_clients_lock); } -static struct workqueue_struct *dm_bufio_wq; -static struct delayed_work dm_bufio_work; - static void work_fn(struct work_struct *w) { cleanup_old_buffers(); - queue_delayed_work(dm_bufio_wq, &dm_bufio_work, + queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, DM_BUFIO_WORK_TIMER_SECS * HZ); } @@ -1881,8 +1928,9 @@ static int __init dm_bufio_init(void) if (!dm_bufio_wq) return -ENOMEM; - INIT_DELAYED_WORK(&dm_bufio_work, work_fn); - queue_delayed_work(dm_bufio_wq, &dm_bufio_work, + INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); + INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); + queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, DM_BUFIO_WORK_TIMER_SECS * HZ); return 0; @@ -1896,7 +1944,8 @@ static void __exit dm_bufio_exit(void) int bug = 0; int i; - cancel_delayed_work_sync(&dm_bufio_work); + cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); + flush_workqueue(dm_bufio_wq); destroy_workqueue(dm_bufio_wq); for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ee6045d6c0bb3717baca5317b3695b2a246b9f2d..201d90f5d1b3b34be0fe47a4d55768bd2a35a3b2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -50,7 +50,7 @@ struct dm_io_client *dm_io_client_create(void) struct dm_io_client *client; unsigned min_ios = dm_get_reserved_bio_based_ios(); - client = kmalloc(sizeof(*client), GFP_KERNEL); + client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index e0cfde3501e0dc45ae6b222c8e458fbb7d9e953d..4609c5b481e23c35bbd261a01bc96e5c19fc1e2d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -828,7 +828,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro int r = -ENOMEM; struct dm_kcopyd_client *kc; - kc = kmalloc(sizeof(*kc), GFP_KERNEL); + kc = kzalloc(sizeof(*kc), GFP_KERNEL); if (!kc) return ERR_PTR(-ENOMEM); diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 85c32b22a420a67daee1aaf64ccfdd354092d33a..91c6f6d72eeec7e97233a7b391db33fe5bad28df 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -179,7 +179,7 @@ struct dm_region_hash *dm_region_hash_create( ; nr_buckets >>= 1; - rh = kmalloc(sizeof(*rh), GFP_KERNEL); + rh = kzalloc(sizeof(*rh), GFP_KERNEL); if (!rh) { DMERR("unable to allocate region hash memory"); return ERR_PTR(-ENOMEM); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 2da0b9b213c7260369b7deabf9744249ba53d1f4..c04d9f22d1607f52c5fbd6cc9c44a9e622b07ebf 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "dm.h" @@ -48,7 +47,7 @@ struct dm_exception_table { }; struct dm_snapshot { - struct rw_semaphore lock; + struct mutex lock; struct dm_dev *origin; struct dm_dev *cow; @@ -106,8 +105,8 @@ struct dm_snapshot { /* The on disk metadata handler */ struct dm_exception_store *store; - /* Maximum number of in-flight COW jobs. */ - struct semaphore cow_count; + unsigned in_progress; + wait_queue_head_t in_progress_wait; struct dm_kcopyd_client *kcopyd_client; @@ -158,8 +157,8 @@ struct dm_snapshot { */ #define DEFAULT_COW_THRESHOLD 2048 -static int cow_threshold = DEFAULT_COW_THRESHOLD; -module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); +static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; +module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, @@ -456,9 +455,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) continue; - down_read(&s->lock); + mutex_lock(&s->lock); active = s->active; - up_read(&s->lock); + mutex_unlock(&s->lock); if (active) { if (snap_src) @@ -926,7 +925,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s) int r; chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; - down_write(&s->lock); + mutex_lock(&s->lock); /* * Process chunks (and associated exceptions) in reverse order @@ -941,7 +940,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s) b = __release_queued_bios_after_merge(s); out: - up_write(&s->lock); + mutex_unlock(&s->lock); if (b) flush_bios(b); @@ -1000,9 +999,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) if (linear_chunks < 0) { DMERR("Read error in exception store: " "shutting down merge"); - down_write(&s->lock); + mutex_lock(&s->lock); s->merge_failed = 1; - up_write(&s->lock); + mutex_unlock(&s->lock); } goto shut; } @@ -1043,10 +1042,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s) previous_count = read_pending_exceptions_done_count(); } - down_write(&s->lock); + mutex_lock(&s->lock); s->first_merging_chunk = old_chunk; s->num_merging_chunks = linear_chunks; - up_write(&s->lock); + mutex_unlock(&s->lock); /* Wait until writes to all 'linear_chunks' drain */ for (i = 0; i < linear_chunks; i++) @@ -1088,10 +1087,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context) return; shut: - down_write(&s->lock); + mutex_lock(&s->lock); s->merge_failed = 1; b = __release_queued_bios_after_merge(s); - up_write(&s->lock); + mutex_unlock(&s->lock); error_bios(b); merge_shutdown(s); @@ -1137,7 +1136,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) origin_mode = FMODE_WRITE; } - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) { ti->error = "Cannot allocate private snapshot structure"; r = -ENOMEM; @@ -1190,7 +1189,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->exception_start_sequence = 0; s->exception_complete_sequence = 0; INIT_LIST_HEAD(&s->out_of_order_list); - init_rwsem(&s->lock); + mutex_init(&s->lock); INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); s->state_bits = 0; @@ -1206,7 +1205,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } - sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); + init_waitqueue_head(&s->in_progress_wait); s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(s->kcopyd_client)) { @@ -1357,9 +1356,9 @@ static void snapshot_dtr(struct dm_target *ti) /* Check whether exception handover must be cancelled */ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest && (s == snap_src)) { - down_write(&snap_dest->lock); + mutex_lock(&snap_dest->lock); snap_dest->valid = 0; - up_write(&snap_dest->lock); + mutex_unlock(&snap_dest->lock); DMERR("Cancelling snapshot handover."); } up_read(&_origins_lock); @@ -1390,13 +1389,62 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); + mutex_destroy(&s->lock); + dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); + WARN_ON(s->in_progress); + kfree(s); } +static void account_start_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + s->in_progress++; + spin_unlock(&s->in_progress_wait.lock); +} + +static void account_end_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + BUG_ON(!s->in_progress); + s->in_progress--; + if (likely(s->in_progress <= cow_threshold) && + unlikely(waitqueue_active(&s->in_progress_wait))) + wake_up_locked(&s->in_progress_wait); + spin_unlock(&s->in_progress_wait.lock); +} + +static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) +{ + if (unlikely(s->in_progress > cow_threshold)) { + spin_lock(&s->in_progress_wait.lock); + if (likely(s->in_progress > cow_threshold)) { + /* + * NOTE: this throttle doesn't account for whether + * the caller is servicing an IO that will trigger a COW + * so excess throttling may result for chunks not required + * to be COW'd. But if cow_threshold was reached, extra + * throttling is unlikely to negatively impact performance. + */ + DECLARE_WAITQUEUE(wait, current); + __add_wait_queue(&s->in_progress_wait, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&s->in_progress_wait.lock); + if (unlock_origins) + up_read(&_origins_lock); + io_schedule(); + remove_wait_queue(&s->in_progress_wait, &wait); + return false; + } + spin_unlock(&s->in_progress_wait.lock); + } + return true; +} + /* * Flush a list of buffers. */ @@ -1412,7 +1460,7 @@ static void flush_bios(struct bio *bio) } } -static int do_origin(struct dm_dev *origin, struct bio *bio); +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); /* * Flush a list of buffers. @@ -1425,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - r = do_origin(s->origin, bio); + r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) generic_make_request(bio); bio = n; @@ -1477,7 +1525,7 @@ static void pending_complete(void *context, int success) if (!success) { /* Read/write error - snapshot is unusable */ - down_write(&s->lock); + mutex_lock(&s->lock); __invalidate_snapshot(s, -EIO); error = 1; goto out; @@ -1485,14 +1533,14 @@ static void pending_complete(void *context, int success) e = alloc_completed_exception(GFP_NOIO); if (!e) { - down_write(&s->lock); + mutex_lock(&s->lock); __invalidate_snapshot(s, -ENOMEM); error = 1; goto out; } *e = pe->e; - down_write(&s->lock); + mutex_lock(&s->lock); if (!s->valid) { free_completed_exception(e); error = 1; @@ -1517,7 +1565,7 @@ static void pending_complete(void *context, int success) full_bio->bi_end_io = pe->full_bio_end_io; increment_pending_exceptions_done_count(); - up_write(&s->lock); + mutex_unlock(&s->lock); /* Submit any pending write bios */ if (error) { @@ -1579,7 +1627,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) } list_add(&pe->out_of_order_entry, lh); } - up(&s->cow_count); + account_end_copy(s); } /* @@ -1603,7 +1651,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ - down(&s->cow_count); + account_start_copy(s); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } @@ -1623,7 +1671,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; - down(&s->cow_count); + account_start_copy(s); callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); @@ -1714,9 +1762,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid) return -EIO; - /* FIXME: should only take write lock if we need - * to copy an exception */ - down_write(&s->lock); + if (bio_data_dir(bio) == WRITE) { + while (unlikely(!wait_for_in_progress(s, false))) + ; /* wait_for_in_progress() has slept */ + } + + mutex_lock(&s->lock); if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_data_dir(bio) == WRITE)) { @@ -1739,9 +1790,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (bio_data_dir(bio) == WRITE) { pe = __lookup_pending_exception(s, chunk); if (!pe) { - up_write(&s->lock); + mutex_unlock(&s->lock); pe = alloc_pending_exception(s); - down_write(&s->lock); + mutex_lock(&s->lock); if (!s->valid || s->snapshot_overflowed) { free_pending_exception(pe); @@ -1776,7 +1827,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) bio->bi_iter.bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { pe->started = 1; - up_write(&s->lock); + mutex_unlock(&s->lock); start_full_bio(pe, bio); goto out; } @@ -1786,7 +1837,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!pe->started) { /* this is protected by snap->lock */ pe->started = 1; - up_write(&s->lock); + mutex_unlock(&s->lock); start_copy(pe); goto out; } @@ -1796,7 +1847,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) } out_unlock: - up_write(&s->lock); + mutex_unlock(&s->lock); out: return r; } @@ -1832,7 +1883,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); - down_write(&s->lock); + mutex_lock(&s->lock); /* Full merging snapshots are redirected to the origin */ if (!s->valid) @@ -1863,12 +1914,12 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) bio->bi_bdev = s->origin->bdev; if (bio_data_dir(bio) == WRITE) { - up_write(&s->lock); - return do_origin(s->origin, bio); + mutex_unlock(&s->lock); + return do_origin(s->origin, bio, false); } out_unlock: - up_write(&s->lock); + mutex_unlock(&s->lock); return r; } @@ -1899,7 +1950,7 @@ static int snapshot_preresume(struct dm_target *ti) down_read(&_origins_lock); (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest) { - down_read(&snap_src->lock); + mutex_lock(&snap_src->lock); if (s == snap_src) { DMERR("Unable to resume snapshot source until " "handover completes."); @@ -1909,7 +1960,7 @@ static int snapshot_preresume(struct dm_target *ti) "source is suspended."); r = -EINVAL; } - up_read(&snap_src->lock); + mutex_unlock(&snap_src->lock); } up_read(&_origins_lock); @@ -1955,11 +2006,11 @@ static void snapshot_resume(struct dm_target *ti) (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); if (snap_src && snap_dest) { - down_write(&snap_src->lock); - down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); + mutex_lock(&snap_src->lock); + mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); __handover_exceptions(snap_src, snap_dest); - up_write(&snap_dest->lock); - up_write(&snap_src->lock); + mutex_unlock(&snap_dest->lock); + mutex_unlock(&snap_src->lock); } up_read(&_origins_lock); @@ -1974,9 +2025,9 @@ static void snapshot_resume(struct dm_target *ti) /* Now we have correct chunk size, reregister */ reregister_snapshot(s); - down_write(&s->lock); + mutex_lock(&s->lock); s->active = 1; - up_write(&s->lock); + mutex_unlock(&s->lock); } static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) @@ -2016,7 +2067,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: - down_write(&snap->lock); + mutex_lock(&snap->lock); if (!snap->valid) DMEMIT("Invalid"); @@ -2041,7 +2092,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type, DMEMIT("Unknown"); } - up_write(&snap->lock); + mutex_unlock(&snap->lock); break; @@ -2107,7 +2158,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, if (dm_target_is_snapshot_merge(snap->ti)) continue; - down_write(&snap->lock); + mutex_lock(&snap->lock); /* Only deal with valid and active snapshots */ if (!snap->valid || !snap->active) @@ -2134,9 +2185,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, pe = __lookup_pending_exception(snap, chunk); if (!pe) { - up_write(&snap->lock); + mutex_unlock(&snap->lock); pe = alloc_pending_exception(snap); - down_write(&snap->lock); + mutex_lock(&snap->lock); if (!snap->valid) { free_pending_exception(pe); @@ -2179,7 +2230,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, } next_snapshot: - up_write(&snap->lock); + mutex_unlock(&snap->lock); if (pe_to_start_now) { start_copy(pe_to_start_now); @@ -2200,15 +2251,24 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, /* * Called on a write from the origin driver. */ -static int do_origin(struct dm_dev *origin, struct bio *bio) +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) { struct origin *o; int r = DM_MAPIO_REMAPPED; +again: down_read(&_origins_lock); o = __lookup_origin(origin->bdev); - if (o) + if (o) { + if (limit) { + struct dm_snapshot *s; + list_for_each_entry(s, &o->snapshots, list) + if (unlikely(!wait_for_in_progress(s, true))) + goto again; + } + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); + } up_read(&_origins_lock); return r; @@ -2321,7 +2381,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, available_sectors); /* Only tell snapshots if this is a write */ - return do_origin(o->dev, bio); + return do_origin(o->dev, bio, true); } static long origin_direct_access(struct dm_target *ti, sector_t sector, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index b8a935ade31664c4255213c6354b51e8ca5dfc83..ccb08a093d229684e35e896f0671b1b5307dc01d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2965,7 +2965,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, return (struct pool *)pmd; } - pool = kmalloc(sizeof(*pool), GFP_KERNEL); + pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { *error = "Error allocating memory for pool"; err_p = ERR_PTR(-ENOMEM); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b6c9f69d1ad05b3ee2e3d9d6784ce54dd3cae813..a2d2ac239c08175f83c31628d24d274b109bd6a8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1953,9 +1953,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); - spin_lock_irq(q->queue_lock); - queue_flag_set(QUEUE_FLAG_DYING, q); - spin_unlock_irq(q->queue_lock); + blk_set_queue_dying(q); if (dm_request_based(md) && md->kworker_task) kthread_flush_worker(&md->kworker); diff --git a/drivers/md/md.c b/drivers/md/md.c index bb65392c9aa52a8083386ee10ad49a74aa47b356..1a7b3f088fd47e8babb5e06d219e147f6ef90fc0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1662,8 +1662,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_BITMAP)) rdev->saved_raid_disk = -1; - } else - set_bit(In_sync, &rdev->flags); + } else { + /* + * If the array is FROZEN, then the device can't + * be in_sync with rest of array. + */ + if (!test_bit(MD_RECOVERY_FROZEN, + &mddev->recovery)) + set_bit(In_sync, &rdev->flags); + } rdev->raid_disk = role; break; } @@ -8573,7 +8580,8 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && + mddev->degraded != mddev->raid_disks) { /* success...*/ /* activate any spares */ if (mddev->pers->spare_active(mddev)) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2bed726d64bf52aed9e3a338c2e404877a4190f..0fc99dd6fb361b831a0c938bf7a79e5357cab7ff 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2960,6 +2960,13 @@ static int raid1_run(struct mddev *mddev) !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || test_bit(Faulty, &conf->mirrors[i].rdev->flags)) mddev->degraded++; + /* + * RAID1 needs at least one disk in active + */ + if (conf->raid_disks - mddev->degraded < 1) { + ret = -EINVAL; + goto abort; + } if (conf->raid_disks - mddev->degraded == 1) mddev->recovery_cp = MaxSector; @@ -2994,8 +3001,12 @@ static int raid1_run(struct mddev *mddev) ret = md_integrity_register(mddev); if (ret) { md_unregister_thread(&mddev->thread); - raid1_free(mddev, conf); + goto abort; } + return 0; + +abort: + raid1_free(mddev, conf); return ret; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7196bc3cadcdb02fc67c506a865a5fb8a0ee8c6d..0c926c7258281fea9ffceb1295c1b9a73bbf48ca 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2416,7 +2416,9 @@ static void raid5_end_read_request(struct bio * bi) && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) retry = 1; if (retry) - if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + if (sh->qd_idx >= 0 && sh->pd_idx == i) + set_bit(R5_ReadError, &sh->dev[i].flags); + else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); } else diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 75a3f4b57fd4fb7dde81ebbcc54ba49f2ea42994..a1cc1c1e5318299bf1d82f7e931f6ee7590d9961 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -314,8 +314,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, if (npads) { dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads), GFP_KERNEL); - if (!dvbdev->pads) + if (!dvbdev->pads) { + kfree(dvbdev->entity); return -ENOMEM; + } } switch (type) { diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 502c72238a4a52440a2fa735686f88ffa00a88bf..db962e2108adfdb5bc5b1bf70586823f1236c0c6 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -708,6 +708,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain) for (m = 6; m >= 0; m--) if (gain >= (1 << m) * 16) break; + + /* Sanity check: don't adjust the gain with a negative value */ + if (m < 0) + return -EINVAL; + rgain = (gain - ((1 << m) * 16)) / (1 << m); rgain |= (((1 << m) - 1) << 4); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index e83f8111a5fb622ad2ed7654914579a5dfaa21b6..b27362ae4a5e0182e6c0e79a72da24b9514f9ee9 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -824,7 +824,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl) return 0; case V4L2_CID_HUE: tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val); - break; + return 0; case V4L2_CID_TEST_PATTERN: decoder->enable = ctrl->val ? false : true; tvp5150_selmux(sd); diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index f7299d3d82449ddaefc1ee76fd3f8cb33650e5b4..bcb51e87c72f02211c9de862809b8e740f7a6091 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -935,7 +935,7 @@ static void ivtv_yuv_init(struct ivtv *itv) } /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */ - yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL|__GFP_NOWARN); + yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN); if (yi->blanking_ptr) { yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE); } else { diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index ba887e8e1b171110f2bac5716fc8a24f38f69230..a85c5199ccd30e228a082ab34541e066e684b0f1 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1469,7 +1469,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma) unsigned long page, pos; mutex_lock(&meye.lock); - if (size > gbuffers * gbufsize) { + if (size > gbuffers * gbufsize || offset > gbuffers * gbufsize - size) { mutex_unlock(&meye.lock); return -EINVAL; } diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index dca0592c5f47157bae6d1b85a751bb51f7aee6fe..6f93568f56204e403103e556990a2f28836f633f 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -355,7 +355,11 @@ static struct i2c_client saa7134_client_template = { /* ----------------------------------------------------------- */ -/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ +/* + * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T + * demod i2c gate closed due to an address clash between this EEPROM + * and the demod one. + */ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) { u8 subaddr = 0x7, dmdregval; @@ -372,14 +376,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); if ((ret == 2) && (dmdregval & 0x2)) { - pr_debug("%s: DVB-T demod i2c gate was left closed\n", + pr_debug("%s: DVB-T demod i2c gate was left open\n", dev->name); data[0] = subaddr; data[1] = (dmdregval & ~0x2); if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) - pr_err("%s: EEPROM i2c gate open failure\n", - dev->name); + pr_err("%s: EEPROM i2c gate close failure\n", + dev->name); } } diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index f5fc8bcbd14b1d771108511eaf5b3bcc402c4dd5..be85a2c4318e7fb372664a9cb010a56548b80ec2 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -304,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER); if (ret < 0) { pr_err("cannot register capture v4l2 device. skipping.\n"); + saa7146_vv_release(dev); + i2c_del_adapter(&hexium->i2c_adapter); + kfree(hexium); return ret; } diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index 99faea2e84c6b38d78ae9ee5b6337e765f2daf9f..78e37cf3470f2c059a2edbbfc2f7ae96d8b2f852 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -1106,7 +1106,8 @@ static int isif_probe(struct platform_device *pdev) while (i >= 0) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); - release_mem_region(res->start, resource_size(res)); + if (res) + release_mem_region(res->start, resource_size(res)); i--; } vpfe_unregister_ccdc_device(&isif_hw_dev); diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index a9bc0175e4d3df2a89f6b768834af9ceb3c8d670..c839003953a74133be9a559bbc789f1119da36b7 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -518,7 +518,7 @@ vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev, else if (v_scale == 4) layer_info->v_zoom = ZOOM_X4; if (v_exp) - layer_info->h_exp = V_EXP_6_OVER_5; + layer_info->v_exp = V_EXP_6_OVER_5; } else { /* no scaling, only cropping. Set display area to crop area */ cfg->ysize = expected_ysize; diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 7f92144a1de3acd812dde54c70b32bd901a8959c..f9456f26ff4facec2eeb7081c029d544ee019201 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev) return -ENODEV; is->pmu_regs = of_iomap(node, 0); + of_node_put(node); if (!is->pmu_regs) return -ENOMEM; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 1a1154a9dfa492e423ce220df6350fcd7689fa96..ef6ccb5b89525025372bb9b6893f231a9df0e69b 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -494,6 +494,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) continue; ret = fimc_md_parse_port_node(fmd, port, index); + of_node_put(port); if (ret < 0) { of_node_put(node); goto rpm_put; @@ -527,6 +528,7 @@ static int __of_get_csis_id(struct device_node *np) if (!np) return -EINVAL; of_property_read_u32(np, "reg", ®); + of_node_put(np); return reg - FIMC_INPUT_MIPI_CSI2_0; } diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index d8d79853a7c5f699030b5e94a782101929f9fbef..4b518462019bfeae33d189da85ef1a75dba9b2d9 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -1850,34 +1850,6 @@ static int venus_hfi_core_release(void *dev) return rc; } -static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index) -{ - struct hfi_queue_header *queue; - struct vidc_iface_q_info *q_info; - u32 write_ptr, read_ptr; - - if (q_index >= VIDC_IFACEQ_NUMQ) { - dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index); - return -ENOENT; - } - - q_info = &dev->iface_queues[q_index]; - if (!q_info) { - dprintk(VIDC_ERR, "cannot read shared Q's\n"); - return -ENOENT; - } - - queue = (struct hfi_queue_header *)q_info->q_hdr; - if (!queue) { - dprintk(VIDC_ERR, "queue not present\n"); - return -ENOENT; - } - - write_ptr = (u32)queue->qhdr_write_idx; - read_ptr = (u32)queue->qhdr_read_idx; - return read_ptr - write_ptr; -} - static void __core_clear_interrupt(struct venus_hfi_device *device) { u32 intr_status = 0; @@ -3206,8 +3178,7 @@ static int __response_handler(struct venus_hfi_device *device) *session_id = session->session_id; } - if (packet_count >= max_packets && - __get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) { + if (packet_count >= max_packets) { dprintk(VIDC_WARN, "Too many packets in message queue to handle at once, deferring read\n"); break; diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c index 2d0e3531f3e78deaa6433ee800478631fb31bc65..550e06029ece297b9c0986c5006a904c7d8aac94 100644 --- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c +++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c @@ -2289,34 +2289,6 @@ static int venus_hfi_core_release(void *dev) return rc; } -static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index) -{ - struct hfi_queue_header *queue; - struct vidc_iface_q_info *q_info; - u32 write_ptr, read_ptr; - - if (q_index >= VIDC_IFACEQ_NUMQ) { - dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index); - return -ENOENT; - } - - q_info = &dev->iface_queues[q_index]; - if (!q_info) { - dprintk(VIDC_ERR, "cannot read shared Q's\n"); - return -ENOENT; - } - - queue = (struct hfi_queue_header *)q_info->q_hdr; - if (!queue) { - dprintk(VIDC_ERR, "queue not present\n"); - return -ENOENT; - } - - write_ptr = (u32)queue->qhdr_write_idx; - read_ptr = (u32)queue->qhdr_read_idx; - return read_ptr - write_ptr; -} - static void __core_clear_interrupt(struct venus_hfi_device *device) { u32 intr_status = 0; @@ -3605,8 +3577,7 @@ static int __response_handler(struct venus_hfi_device *device) *session_id = session->session_id; } - if (packet_count >= max_packets && - __get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) { + if (packet_count >= max_packets) { dprintk(VIDC_WARN, "Too many packets in message queue to handle at once, deferring read\n"); break; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index a21b12c5c08539cd693a48d039b344cb77b4a40a..ce651d3ca1b8288344ad8dc2a15d5ba4556bfa34 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -726,6 +726,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe, s_stream, mode); pipe->do_propagation = true; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return 0; @@ -840,6 +844,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) &subdev->entity); failure = -ETIMEDOUT; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return failure; diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 882310eb45ccfa51e309769058a21433aa485655..fe16fbd95221ff70db402220276100616796f5db 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -2608,6 +2608,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc, int ret; /* Register the subdev and video node. */ + ccdc->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccdc->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c index ca095238510d5774109e114eb795c441be763dbf..b64e218eaea6e256ad66b33f12d7bcf423e23377 100644 --- a/drivers/media/platform/omap3isp/ispccp2.c +++ b/drivers/media/platform/omap3isp/ispccp2.c @@ -1030,6 +1030,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2, int ret; /* Register the subdev and video nodes. */ + ccp2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccp2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index f75a1be29d84aa1f8ff63f2a60f557297369989d..27a2913363b62dfc72415df2cc7ccafd3f9fba72 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -1206,6 +1206,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2, int ret; /* Register the subdev and video nodes. */ + csi2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &csi2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index ac30a0f837801ae57d3227429e4492a23306a0a9..e981eb2330f1813b9a00f6867872630a2180da1b 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev, int ret; /* Register the subdev and video nodes. */ + prev->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &prev->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c index 0b6a87508584f4eb5201f3f2b0bf284c1b7380f7..2035e3c6a9deeeb5660d25ae690b4719ac9e4ebb 100644 --- a/drivers/media/platform/omap3isp/ispresizer.c +++ b/drivers/media/platform/omap3isp/ispresizer.c @@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res, int ret; /* Register the subdev and video nodes. */ + res->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &res->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 1b9217d3b1b6af3b01a2979cc9a7cea28c027718..4a4ae637655ba1a54b435b35761641c3307fb3e0 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c @@ -1010,6 +1010,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat) int omap3isp_stat_register_entities(struct ispstat *stat, struct v4l2_device *vdev) { + stat->subdev.dev = vdev->mdev->dev; + return v4l2_device_register_subdev(vdev, &stat->subdev); } diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 390d708c807a0e1ad60acea862c146f45daeb0b8..3fab9f776afa702ab51532eecc098e7b8beef37b 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -2334,7 +2334,7 @@ static int pxa_camera_probe(struct platform_device *pdev) pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; - if (&pdev->dev.of_node && !pcdev->pdata) { + if (pdev->dev.of_node && !pcdev->pdata) { err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); } else { pcdev->platform_flags = pcdev->pdata->flags; diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index d300e5e7eadc87f90d62de93cf46d1420be26b40..2ca9c928ed2f201095e7fd2e7fd00ac3d95af0c1 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -777,7 +777,11 @@ static int vivid_thread_vid_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->cap_seq_resync) { dev->jiffies_vid_cap = cur_jiffies; @@ -930,8 +934,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_cap); dev->kthread_vid_cap = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index 7c8d758528161d82013c60cffa0fa98c92b6cb07..ed5d8fb854b46459630554bbc88237cfb5a313bd 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -147,7 +147,11 @@ static int vivid_thread_vid_out(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->out_seq_resync) { dev->jiffies_vid_out = cur_jiffies; @@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) /* shutdown control thread */ vivid_grab_controls(dev, false); - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_vid_out); dev->kthread_vid_out = NULL; - mutex_lock(&dev->mutex); } diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index ebd7b9c4dd830ec3c78a88f90abbb5a84ce55926..4f49c9a47d49e6f2ab3aebcb7a50ce787eadeaf4 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c @@ -149,7 +149,11 @@ static int vivid_thread_sdr_cap(void *data) if (kthread_should_stop()) break; - mutex_lock(&dev->mutex); + if (!mutex_trylock(&dev->mutex)) { + schedule_timeout_uninterruptible(1); + continue; + } + cur_jiffies = jiffies; if (dev->sdr_cap_seq_resync) { dev->jiffies_sdr_cap = cur_jiffies; @@ -309,10 +313,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) } /* shutdown control thread */ - mutex_unlock(&dev->mutex); kthread_stop(dev->kthread_sdr_cap); dev->kthread_sdr_cap = NULL; - mutex_lock(&dev->mutex); } const struct vb2_ops vivid_sdr_cap_qops = { diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index a72982df4777cbb21b19679825ea31510e720ada..82621260fc34b373c46dd9cb0c5ff937a9dac17f 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -236,9 +236,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_out_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_cap) - return 0; - dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); for (i = 0; i < VIDEO_MAX_FRAME; i++) diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index dd609eea4753cc6d8117787939b69b1c20a7d77d..8fed2fbe91a98e48e9cd03c641b861b56e9d73e0 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -158,9 +158,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) if (vb2_is_streaming(&dev->vb_vid_cap_q)) dev->can_loop_video = vivid_vid_can_loop(dev); - if (dev->kthread_vid_out) - return 0; - dev->vid_out_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); if (dev->start_streaming_error) { diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 4b132c29f2900c941490e02aa3d28275e0790db2..1d045a8c29e2156a92a210b9bbd6c16cd057b551 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -742,7 +742,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, /* start radio */ retval = si470x_start_usb(radio); if (retval < 0) - goto err_all; + goto err_buf; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ @@ -757,6 +757,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, return 0; err_all: + usb_kill_urb(radio->int_in_urb); +err_buf: kfree(radio->buffer); err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); @@ -830,6 +832,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf) mutex_lock(&radio->lock); v4l2_device_disconnect(&radio->v4l2_dev); video_unregister_device(&radio->videodev); + usb_kill_urb(radio->int_in_urb); usb_set_intfdata(intf, NULL); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index 5f634545ddd81c6e98887ed93a8fc2abb8a29f21..25470395c43f1c838d7b28a1e739894d9fd84c61 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -430,6 +430,10 @@ static int iguanair_probe(struct usb_interface *intf, int ret, pipein, pipeout; struct usb_host_interface *idesc; + idesc = intf->altsetting; + if (idesc->desc.bNumEndpoints < 2) + return -ENODEV; + ir = kzalloc(sizeof(*ir), GFP_KERNEL); rc = rc_allocate_device(); if (!ir || !rc) { @@ -444,18 +448,13 @@ static int iguanair_probe(struct usb_interface *intf, ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ir->urb_out = usb_alloc_urb(0, GFP_KERNEL); - if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) { + if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out || + !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) || + !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) { ret = -ENOMEM; goto out; } - idesc = intf->altsetting; - - if (idesc->desc.bNumEndpoints < 2) { - ret = -ENODEV; - goto out; - } - ir->rc = rc; ir->dev = &intf->dev; ir->udev = udev; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index f072bf28fccdd4cb71c7487de26e4c3d07ec8b0e..0b386fd518cc01f87caa89da5aad36683e748ee7 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1644,8 +1644,7 @@ static void imon_incoming_packet(struct imon_context *ictx, spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ - if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && - buf[7] == 0x86) { + if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index 38e73ee5c8fb1b8de696c78bb4feb07ccedc3341..78f0bf8ee084c1be71e96fbc46be888c59e27caf 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -639,7 +639,7 @@ static int au0828_usb_probe(struct usb_interface *interface, /* Analog TV */ retval = au0828_analog_register(dev, interface); if (retval) { - pr_err("%s() au0282_dev_register failed to register on V4L2\n", + pr_err("%s() au0828_analog_register failed to register on V4L2\n", __func__); goto done; } @@ -647,7 +647,7 @@ static int au0828_usb_probe(struct usb_interface *interface, /* Digital TV */ retval = au0828_dvb_register(dev); if (retval) - pr_err("%s() au0282_dev_register failed\n", + pr_err("%s() au0828_dvb_register failed\n", __func__); /* Remote controller */ diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 52bc42da8a4ce4dde4848c3a2497c5b286b6ae8b..1fc3c8d7dd9b8c73efb0967529a1aeb0bc0ee2ae 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf, struct flexcop_device *fc = NULL; int ret; + if (intf->cur_altsetting->desc.bNumEndpoints < 1) + return -ENODEV; + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { err("out of memory\n"); return -ENOMEM; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 21e5454d260a0182a211d8b4fabfef123d028405..30e27844e0e999c51cb8b19a58e8143f73eb126e 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -690,6 +690,10 @@ static int submit_urbs(struct camera_data *cam) if (!urb) { for (j = 0; j < i; j++) usb_free_urb(cam->sbuf[j].urb); + for (j = 0; j < NUM_SBUF; j++) { + kfree(cam->sbuf[j].data); + cam->sbuf[j].data = NULL; + } return -ENOMEM; } diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index 6414188ffdfac89d807fa885e707688ec9532f07..cd973e780da936023422e6cb20d3dcfbb8c9842c 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -1389,7 +1389,7 @@ int cx231xx_g_register(struct file *file, void *priv, ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, (u16)reg->reg, value, 4); reg->val = value[0] | value[1] << 8 | - value[2] << 16 | value[3] << 24; + value[2] << 16 | (u32)value[3] << 24; reg->size = 4; break; case 1: /* AFE - read byte */ diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index b20f03d86e001efa78f8dcd617fb39dd8818c89d..2b7a1b569db0b79e39e51f84e004f2c70024881a 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -437,7 +437,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) u8 ircode[4]; int i; - cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); + if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) + return 0; *event = 0; *state = REMOTE_NO_KEY_PRESSED; diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 2868766893c8535d038c8be72e1187cfe244b2e6..c7c8fea0f1fa1cff2242ed005a224e53d3d3815a 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -2438,9 +2438,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) 8, 0x0486, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); @@ -2516,10 +2520,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) 0, 0x00ef, 8, 0x0406, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0) return -ENODEV; diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 4706628a3ed5ea5345e30a9dc59cad7fb385286d..10bccce22858a93cbeab296e988cb2414c66fab2 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -612,10 +612,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) static int technisat_usb2_get_ir(struct dvb_usb_device *d) { struct technisat_usb2_state *state = d->priv; - u8 *buf = state->buf; - u8 *b; - int ret; struct ir_raw_event ev; + u8 *buf = state->buf; + int i, ret; buf[0] = GET_IR_DATA_VENDOR_REQUEST; buf[1] = 0x08; @@ -651,26 +650,25 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d) return 0; /* no key pressed */ /* decoding */ - b = buf+1; #if 0 deb_rc("RC: %d ", ret); - debug_dump(b, ret, deb_rc); + debug_dump(buf + 1, ret, deb_rc); #endif ev.pulse = 0; - while (1) { - ev.pulse = !ev.pulse; - ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; - ir_raw_event_store(d->rc_dev, &ev); - - b++; - if (*b == 0xff) { + for (i = 1; i < ARRAY_SIZE(state->buf); i++) { + if (buf[i] == 0xff) { ev.pulse = 0; ev.duration = 888888*2; ir_raw_event_store(d->rc_dev, &ev); break; } + + ev.pulse = !ev.pulse; + ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR * + FIRMWARE_CLOCK_TICK) / 1000; + ir_raw_event_store(d->rc_dev, &ev); } ir_raw_event_handle(d->rc_dev); diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c index 78542fff403fcd13792d1a9f5fd135f695064101..5a37d32e8fd091fffecd67899be112b694287859 100644 --- a/drivers/media/usb/gspca/konica.c +++ b/drivers/media/usb/gspca/konica.c @@ -127,6 +127,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 2); } } diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c index 599f755e75b86513c28228468bc19e15278a414a..7ebeee98dc1bb679db4380cbac02df91a751c0e7 100644 --- a/drivers/media/usb/gspca/nw80x.c +++ b/drivers/media/usb/gspca/nw80x.c @@ -1584,6 +1584,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); return; } if (len == 1) diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index 965372a5ff2f3821e1bdbdaba79ee7da17895212..7ac38905080addbabfe634840165a78ac64c759d 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -2087,6 +2087,11 @@ static int reg_r(struct sd *sd, u16 index) } else { PERR("reg_r %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return ret; @@ -2115,6 +2120,11 @@ static int reg_r8(struct sd *sd, } else { PERR("reg_r8 %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 8); } return ret; diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 9266a5c9abc5db0197b7305530e06dfbbc9dd541..ba289b4530772ab1194633b3e20992d0c639ab82 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -645,6 +645,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("read failed %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c index 47085cf2d72365b5fdac410d5394bbdb8a7407b0..f2dca06069355c0f55bb243bffb1e6a40dbf836b 100644 --- a/drivers/media/usb/gspca/ov534_9.c +++ b/drivers/media/usb/gspca/ov534_9.c @@ -1157,6 +1157,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + return 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c index 5102cea504710c5c2aeeba1b0101159f35b0b459..6adbb0eca71fe78bb1aeb48b22f7bd0a60f993fa 100644 --- a/drivers/media/usb/gspca/se401.c +++ b/drivers/media/usb/gspca/se401.c @@ -115,6 +115,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent) pr_err("read req failed req %#04x error %d\n", req, err); gspca_dev->usb_err = err; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE); } } diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index 10269dad9d201adac6b50e5d9aac7238537e315f..11c794aea045998bbab40e6916cf4c74306578e3 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -137,6 +137,13 @@ static const struct dmi_system_id flip_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0341") } }, + { + .ident = "MSI MS-1039", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"), + } + }, { .ident = "MSI MS-1632", .matches = { @@ -923,6 +930,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) if (unlikely(result < 0 || result != length)) { pr_err("Read register %02x failed %d\n", reg, result); gspca_dev->usb_err = result; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 6696b2ec34e969dd631a91ca2dbe6b3ac5fac1fd..83e98b85ab6a1d3f65fe7d44ae91540af065adba 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -466,6 +466,11 @@ static void reg_r(struct gspca_dev *gspca_dev, dev_err(gspca_dev->v4l2_dev.dev, "Error reading register %02x: %d\n", value, res); gspca_dev->usb_err = res; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } } diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index d49d76ec142126c84868dcfd348aaf835ecaaee8..9ec63f75b8ea4dccb41c25e9e28defaa42c599d0 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -1174,6 +1174,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c index f38fd8949609fd917cee8568a4cba9bdb5e83cb4..ee93bd443df5d1411514b79681fd84b0d05dc993 100644 --- a/drivers/media/usb/gspca/spca1528.c +++ b/drivers/media/usb/gspca/spca1528.c @@ -84,6 +84,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c index e274cf19a3ea22a1448f340f7a779c5a1274d528..b236e9dcd46854f4f91147a3b03dbf4793c2946c 100644 --- a/drivers/media/usb/gspca/sq930x.c +++ b/drivers/media/usb/gspca/sq930x.c @@ -438,6 +438,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r %04x failed %d\n", value, ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index 46c9f2229a18675c6a1ca39528dab2196937c0c5..cc3e1478c5a09b04e1b1f724a381a48844cc5ded 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -268,6 +268,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c index b4efb2fb36fa3b875e96e5a668d2f349289e8bad..5032b9d7d9bb2a8532efc99712e26b09d315873b 100644 --- a/drivers/media/usb/gspca/vc032x.c +++ b/drivers/media/usb/gspca/vc032x.c @@ -2919,6 +2919,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } static void reg_r(struct gspca_dev *gspca_dev, diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index 896f1b2b91793907a62d751f178c30001da683bc..948aaae4d47eb24ea9b1489b5544038c35d8b5c8 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -147,6 +147,11 @@ static int w9968cf_read_sb(struct sd *sd) } else { pr_err("Read SB reg [01] failed\n"); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(sd->gspca_dev.usb_buf, 0, 2); } udelay(W9968CF_I2C_BUS_DELAY); diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index a20b60ac66ca46e556592c17fab009cfe1aa3824..99171b912a2d8bfd64bcbcdf8e2da8008fb50935 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -143,6 +143,7 @@ static int device_authorization(struct hdpvr_device *dev) dev->fw_ver = dev->usbc_buf[1]; + dev->usbc_buf[46] = '\0'; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); @@ -278,6 +279,7 @@ static int hdpvr_probe(struct usb_interface *interface, #endif size_t buffer_size; int i; + int dev_num; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ @@ -382,8 +384,17 @@ static int hdpvr_probe(struct usb_interface *interface, } #endif + dev_num = atomic_inc_return(&dev_nr); + if (dev_num >= HDPVR_MAX) { + v4l2_err(&dev->v4l2_dev, + "max device number reached, device register failed\n"); + atomic_dec(&dev_nr); + retval = -ENODEV; + goto reg_fail; + } + retval = hdpvr_register_videodev(dev, &interface->dev, - video_nr[atomic_inc_return(&dev_nr)]); + video_nr[dev_num]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto reg_fail; diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 1c48f2f1e14aa55ada2e381d10025777a7882f81..7297fd261df947044cb443b8ab626c4ed8c2f774 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -647,8 +647,7 @@ static int v4l_stk_release(struct file *fp) dev->owner = NULL; } - if (is_present(dev)) - usb_autopm_put_interface(dev->interface); + usb_autopm_put_interface(dev->interface); mutex_unlock(&dev->lock); return v4l2_fh_release(fp); } diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index ee88ae83230c26ca7df038e288ad800d47e45582..185c8079d0f933805dc940cdc902359569d0d69d 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -111,6 +111,7 @@ static void tm6000_urb_received(struct urb *urb) printk(KERN_ERR "tm6000: error %s\n", __func__); kfree(urb->transfer_buffer); usb_free_urb(urb); + dev->dvb->bulk_urb = NULL; } } } @@ -141,6 +142,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL); if (dvb->bulk_urb->transfer_buffer == NULL) { usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n"); return -ENOMEM; } @@ -168,6 +170,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) kfree(dvb->bulk_urb->transfer_buffer); usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; return ret; } diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 4e7671a3a1e4a2408c02d1e0a8c3d57038a60571..d7397c0d7f8690b6ab29a7bd341d0f7eed68f3cf 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -278,7 +278,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command, dprintk("%s\n", __func__); - b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); + b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); if (!b) return -ENOMEM; diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index bfdf723553322788a32ed186aa5b21f51b87875e..230f50aa300029cb61d273dc4061b050553df607 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -332,6 +332,10 @@ static int usbvision_v4l2_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto unlock; + } if (usbvision->user) { err_code = -EBUSY; } else { @@ -395,6 +399,7 @@ static int usbvision_v4l2_open(struct file *file) static int usbvision_v4l2_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, "close"); @@ -409,9 +414,10 @@ static int usbvision_v4l2_close(struct file *file) usbvision_scratch_free(usbvision); usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); return 0; @@ -1095,6 +1101,11 @@ static int usbvision_radio_open(struct file *file) if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; + + if (usbvision->remove_pending) { + err_code = -ENODEV; + goto out; + } err_code = v4l2_fh_open(file); if (err_code) goto out; @@ -1127,6 +1138,7 @@ static int usbvision_radio_open(struct file *file) static int usbvision_radio_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); + int r; PDEBUG(DBG_IO, ""); @@ -1139,9 +1151,10 @@ static int usbvision_radio_close(struct file *file) usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; + r = usbvision->remove_pending; mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->remove_pending) { + if (r) { printk(KERN_INFO "%s: Final disconnect\n", __func__); v4l2_fh_release(file); usbvision_release(usbvision); @@ -1568,6 +1581,7 @@ static int usbvision_probe(struct usb_interface *intf, static void usbvision_disconnect(struct usb_interface *intf) { struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); + int u; PDEBUG(DBG_PROBE, ""); @@ -1584,13 +1598,14 @@ static void usbvision_disconnect(struct usb_interface *intf) v4l2_device_disconnect(&usbvision->v4l2_dev); usbvision_i2c_unregister(usbvision); usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ + u = usbvision->user; usb_put_dev(usbvision->dev); usbvision->dev = NULL; /* USB device is no more */ mutex_unlock(&usbvision->v4l2_lock); - if (usbvision->user) { + if (u) { printk(KERN_INFO "%s: In use, disconnect pending\n", __func__); wake_up_interruptible(&usbvision->wait_frame); diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 24487d5fd83bae6df89c466adcc1dfda9bdfb5db..d5347a2804fb5e63389f79df3f45a9edb87fb74a 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2021,6 +2021,21 @@ static int uvc_probe(struct usb_interface *intf, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); + /* Initialize the media device. */ +#ifdef CONFIG_MEDIA_CONTROLLER + dev->mdev.dev = &intf->dev; + strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); + if (udev->serial) + strscpy(dev->mdev.serial, udev->serial, + sizeof(dev->mdev.serial)); + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); + dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); + dev->mdev.driver_version = LINUX_VERSION_CODE; + media_device_init(&dev->mdev); + + dev->vdev.mdev = &dev->mdev; +#endif + /* Parse the Video Class control descriptor. */ if (uvc_parse_control(dev) < 0) { uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " @@ -2041,20 +2056,7 @@ static int uvc_probe(struct usb_interface *intf, "linux-uvc-devel mailing list.\n"); } - /* Initialize the media device and register the V4L2 device. */ -#ifdef CONFIG_MEDIA_CONTROLLER - dev->mdev.dev = &intf->dev; - strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); - if (udev->serial) - strlcpy(dev->mdev.serial, udev->serial, - sizeof(dev->mdev.serial)); - strcpy(dev->mdev.bus_info, udev->devpath); - dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); - dev->mdev.driver_version = LINUX_VERSION_CODE; - media_device_init(&dev->mdev); - - dev->vdev.mdev = &dev->mdev; -#endif + /* Register the V4L2 device. */ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) goto error; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 48db922075e2adbcec880950edf7a9700827218b..08fa6400d2558f92887a14263eb5ca7247efadd5 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -947,7 +947,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, if (!cnt) { rc = -ENODEV; pci_dev_busy = 1; - goto err_out; + goto err_out_int; } jm = kzalloc(sizeof(struct jmb38x_ms) diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 0556a9749dbe08ffa107a98415a1f05238c37015..1f0c2b594654eeb0524b5fed9e949477b3a97022 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona) if (ret != 0) goto err_ref; ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); - if (ret != 0) - goto err_pm; + if (ret != 0) { + pm_runtime_put_sync(arizona->dev); + goto err_ref; + } break; case ARIZONA_32KZ_MCLK2: ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]); @@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona) ARIZONA_CLK_32K_ENA); } -err_pm: - pm_runtime_put_sync(arizona->dev); err_ref: if (ret != 0) arizona->clk32k_ref--; diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 9ff243970e93ef1c025df40ca3e4474f59c371f5..5b41111e62fd1477335b593b499e343b7630b1dd 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev, info->mem = &pdev->resource[0]; info->irq = pdev->irq; + pdev->d3cold_delay = 0; + /* Probably it is enough to set this for iDMA capable devices only */ pci_set_master(pdev); diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 2d6e2c39278626d06bd45ed70e73d1a457a835cc..4a2fc59d59016220dd9b0ac6d05057f1353a94b3 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -155,12 +155,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata( pd->ono = irq_of_parse_and_map(dev->of_node, 1); - /* - * ToDo: the 'wakeup' member in the platform data is more of a linux - * specfic information. Hence, there is no binding for that yet and - * not parsed here. - */ - return pd; } @@ -248,7 +242,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c, */ /* MAX8997 has a power button input. */ - device_init_wakeup(max8997->dev, pdata->wakeup); + device_init_wakeup(max8997->dev, true); return ret; diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index 6c16f170529f5bf80592407ea4df415356add929..75d52034f89da0490367f0837480c9de6eea2687 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, if (ret) goto out; - adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2; + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | + MC13XXX_ADC0_CHRGRAWDIV; adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC; if (channel > 7) diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 60286adbd6a1ca703721b3357fc84abee1cc8d83..e56f0844b98de127845c34e33f3a6d3b6e9045c5 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -295,11 +295,24 @@ static int ti_tscadc_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data) +{ + return device_may_wakeup(dev); +} + static int __maybe_unused tscadc_suspend(struct device *dev) { struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev); regmap_write(tscadc->regmap, REG_SE, 0x00); + if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) { + u32 ctrl; + + regmap_read(tscadc->regmap, REG_CTRL, &ctrl); + ctrl &= ~(CNTRLREG_POWERDOWN); + ctrl |= CNTRLREG_TSCSSENB; + regmap_write(tscadc->regmap, REG_CTRL, ctrl); + } pm_runtime_put_sync(dev); return 0; diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 3e102cd6ed914d992152128423cce8cbab33e830..d08509cd978a45a73436935675aaca3dfc46777e 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -1026,8 +1026,6 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n void cxl_guest_remove_afu(struct cxl_afu *afu) { - pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); - if (!afu) return; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index b642b4fd731b6b45335138630eb2da5fb357efcd..95584bffa4eaf4a4c2a706b9d2d6a38b1fce7950 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -298,7 +298,7 @@ static int genwqe_sgl_size(int num_pages) int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, void __user *user_addr, size_t user_size) { - int rc; + int ret = -ENOMEM; struct pci_dev *pci_dev = cd->pci_dev; sgl->fpage_offs = offset_in_page((unsigned long)user_addr); @@ -317,7 +317,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (get_order(sgl->sgl_size) > MAX_ORDER) { dev_err(&pci_dev->dev, "[%s] err: too much memory requested!\n", __func__); - return -ENOMEM; + return ret; } sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, @@ -325,7 +325,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, if (sgl->sgl == NULL) { dev_err(&pci_dev->dev, "[%s] err: no memory available!\n", __func__); - return -ENOMEM; + return ret; } /* Only use buffering on incomplete pages */ @@ -338,7 +338,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, /* Sync with user memory */ if (copy_from_user(sgl->fpage + sgl->fpage_offs, user_addr, sgl->fpage_size)) { - rc = -EFAULT; + ret = -EFAULT; goto err_out; } } @@ -351,7 +351,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, /* Sync with user memory */ if (copy_from_user(sgl->lpage, user_addr + user_size - sgl->lpage_size, sgl->lpage_size)) { - rc = -EFAULT; + ret = -EFAULT; goto err_out2; } } @@ -373,7 +373,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, sgl->sgl = NULL; sgl->sgl_dma_addr = 0; sgl->sgl_size = 0; - return -ENOMEM; + + return ret; } int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 01e0fa74c7bcbff9270bc4b698d007ef50aae2a8..9c5590eb151e2e8ca9cfc65c34a48bddf5633027 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -979,6 +979,12 @@ static void kgdbts_run_tests(void) int nmi_sleep = 0; int i; + verbose = 0; + if (strstr(config, "V1")) + verbose = 1; + if (strstr(config, "V2")) + verbose = 2; + ptr = strchr(config, 'F'); if (ptr) fork_test = simple_strtol(ptr + 1, NULL, 10); @@ -1062,13 +1068,6 @@ static int kgdbts_option_setup(char *opt) return -ENOSPC; } strcpy(config, opt); - - verbose = 0; - if (strstr(config, "V1")) - verbose = 1; - if (strstr(config, "V2")) - verbose = 2; - return 0; } @@ -1080,9 +1079,6 @@ static int configure_kgdbts(void) if (!strlen(config) || isspace(config[0])) goto noconfig; - err = kgdbts_option_setup(config); - if (err) - goto noconfig; final_ack = 0; run_plant_and_detach_test(1); diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c index cac3bcc308a7ebfc99bc40522389587060b251f7..7bb929f05d85227b1f6b16f5d037ac58ad3e2200 100644 --- a/drivers/misc/mic/scif/scif_fence.c +++ b/drivers/misc/mic/scif/scif_fence.c @@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val) dma_fail: if (!x100) dma_pool_free(ep->remote_dev->signal_pool, status, - status->src_dma_addr); + src - offsetof(struct scif_status, val)); alloc_fail: return err; } diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 67d8e33246fab7cb1ba9507eb33a9e0cd4c3db09..a64e44589c732f1d01fb26f6ea52dbb0b8d382c0 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -1,7 +1,7 @@ /* * QTI Secure Execution Environment Communicator (QSEECOM) driver * - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1814,6 +1814,16 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, if (ptr_svc->svc.listener_id == lstnr) { ptr_svc->listener_in_use = true; ptr_svc->rcv_req_flag = 1; + rc = msm_ion_do_cache_op(qseecom.ion_clnt, + ptr_svc->ihandle, + ptr_svc->sb_virt, + ptr_svc->sb_length, + ION_IOC_INV_CACHES); + if (rc) { + pr_err("cache opp failed %d\n", rc); + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } wake_up_interruptible(&ptr_svc->rcv_req_wq); break; } @@ -2142,6 +2152,16 @@ static int __qseecom_reentrancy_process_incomplete_cmd( if (ptr_svc->svc.listener_id == lstnr) { ptr_svc->listener_in_use = true; ptr_svc->rcv_req_flag = 1; + rc = msm_ion_do_cache_op(qseecom.ion_clnt, + ptr_svc->ihandle, + ptr_svc->sb_virt, + ptr_svc->sb_length, + ION_IOC_INV_CACHES); + if (rc) { + pr_err("cache opp failed %d\n", rc); + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } wake_up_interruptible(&ptr_svc->rcv_req_wq); break; } @@ -5043,8 +5063,10 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, } perf_enabled = true; } - if (!strcmp(data->client.app_name, "securemm")) + if (!strcmp(data->client.app_name, "securemm") || + !strcmp(data->client.app_name, "bgapp")) { data->use_legacy_cmd = true; + } ret = __qseecom_send_cmd(data, &req); data->use_legacy_cmd = false; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 6f9535e5e584b4469f0fd01bc5c2ad625d255e50..7fc6ce3811421eabd31f9b0fee305085a94aadf3 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -870,6 +870,7 @@ static void msdc_start_command(struct msdc_host *host, WARN_ON(host->cmd); host->cmd = cmd; + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); if (!msdc_cmd_is_ready(host, mrq, cmd)) return; @@ -881,7 +882,6 @@ static void msdc_start_command(struct msdc_host *host, cmd->error = 0; rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); - mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); writel(cmd->arg, host->base + SDC_ARG); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 2ff6140ea0b752ae2348e019fa0c4db200f9d5f5..7f7af312e7ad34663fd403817da8f84304921595 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -318,7 +318,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); /* HS200 is broken at this moment */ - host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200; + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; ret = sdhci_add_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index fb20e826699239d61f404bfa62eb798ba9221f87..50f6b00deb021d4c8a0e9f4a889d529c02f0ec9b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1929,7 +1929,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ctrl_2 |= SDHCI_CTRL_UHS_SDR104; else if (timing == MMC_TIMING_UHS_SDR12) ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (timing == MMC_TIMING_UHS_SDR25) + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) ctrl_2 |= SDHCI_CTRL_UHS_SDR25; else if (timing == MMC_TIMING_UHS_SDR50) ctrl_2 |= SDHCI_CTRL_UHS_SDR50; diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index de35a2a362f9e01751ad35218f98729beb80dc85..8725e406a9eb9d61718174a683915535c6576c49 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1624,29 +1624,35 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ + /* + * We check "time_after" and "!chip_good" before checking + * "chip_good" to avoid the failure due to scheduling. + */ + if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) { xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); + ret = -EIO; break; } - if (chip_ready(map, adr)) + if (chip_good(map, adr, datum)) break; /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } + /* Did we succeed? */ - if (!chip_good(map, adr, datum)) { + if (ret) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_RETRIES) + if (++retry_cnt <= MAX_RETRIES) { + ret = 0; goto retry; - - ret = -EIO; + } } xip_enable(map, chip, adr); op_done: diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 3fad35942895c0186dfa08d22ab655fc0a4ed37f..7a716bed115801522d941ab1fee8e5a328640812 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -29,7 +29,6 @@ struct of_flash_list { struct mtd_info *mtd; struct map_info map; - struct resource *res; }; struct of_flash { @@ -54,18 +53,10 @@ static int of_flash_remove(struct platform_device *dev) mtd_concat_destroy(info->cmtd); } - for (i = 0; i < info->list_size; i++) { + for (i = 0; i < info->list_size; i++) if (info->list[i].mtd) map_destroy(info->list[i].mtd); - if (info->list[i].map.virt) - iounmap(info->list[i].map.virt); - - if (info->list[i].res) { - release_resource(info->list[i].res); - kfree(info->list[i].res); - } - } return 0; } @@ -223,10 +214,11 @@ static int of_flash_probe(struct platform_device *dev) err = -EBUSY; res_size = resource_size(&res); - info->list[i].res = request_mem_region(res.start, res_size, - dev_name(&dev->dev)); - if (!info->list[i].res) + info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res); + if (IS_ERR(info->list[i].map.virt)) { + err = PTR_ERR(info->list[i].map.virt); goto err_out; + } err = -ENXIO; width = of_get_property(dp, "bank-width", NULL); @@ -247,15 +239,6 @@ static int of_flash_probe(struct platform_device *dev) return err; } - err = -ENOMEM; - info->list[i].map.virt = ioremap(info->list[i].map.phys, - info->list[i].map.size); - if (!info->list[i].map.virt) { - dev_err(&dev->dev, "Failed to ioremap() flash" - " region\n"); - goto err_out; - } - simple_map_init(&info->list[i].map); /* diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c index 5223a2182ee44dfbfbda6f578266390f52d4f8b6..ca95ae00215ecc00e66cd244185fdb08e4f90e7b 100644 --- a/drivers/mtd/nand/mtk_nand.c +++ b/drivers/mtd/nand/mtk_nand.c @@ -810,19 +810,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, return ret & NAND_STATUS_FAIL ? -EIO : 0; } -static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start, + u32 sectors) { struct nand_chip *chip = mtd_to_nand(mtd); struct mtk_nfc *nfc = nand_get_controller_data(chip); struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); struct mtk_ecc_stats stats; + u32 reg_size = mtk_nand->fdm.reg_size; int rc, i; rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; if (rc) { memset(buf, 0xff, sectors * chip->ecc.size); for (i = 0; i < sectors; i++) - memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); + memset(oob_ptr(chip, start + i), 0xff, reg_size); return 0; } @@ -842,7 +844,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u32 spare = mtk_nand->spare_per_sector; u32 column, sectors, start, end, reg; dma_addr_t addr; - int bitflips; + int bitflips = 0; size_t len; u8 *buf; int rc; @@ -910,14 +912,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, if (rc < 0) { dev_err(nfc->dev, "subpage done timeout\n"); bitflips = -EIO; - } else { - bitflips = 0; - if (!raw) { - rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); - bitflips = rc < 0 ? -ETIMEDOUT : - mtk_nfc_update_ecc_stats(mtd, buf, sectors); - mtk_nfc_read_fdm(chip, start, sectors); - } + } else if (!raw) { + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); + bitflips = rc < 0 ? -ETIMEDOUT : + mtk_nfc_update_ecc_stats(mtd, buf, start, sectors); + mtk_nfc_read_fdm(chip, start, sectors); } dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 442ce619b3b6d5cb8a65e2f8740e30e01e750aed..d6c013f93b8c020edbcbda4ba3c0bcff00c40ecd 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -480,7 +480,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) /* initiate DMA transfer */ if (flctl->chan_fifo0_rx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0) goto convert; /* DMA success */ /* do polling transfer */ @@ -539,7 +539,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, /* initiate DMA transfer */ if (flctl->chan_fifo0_tx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0) return; /* DMA success */ /* do polling transfer */ diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 93ceea4f27d5731f865c57be71f69c35b46d274f..ab0120c0bee2dbe74319f83cbe963219aee76e8e 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -834,9 +834,21 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, int err = 0; struct ubi_ainf_peb *aeb, *tmp_aeb; - if (!list_empty(&ai->free)) { - aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); + list_for_each_entry_safe(aeb, tmp_aeb, &ai->free, u.list) { list_del(&aeb->u.list); + if (aeb->ec == UBI_UNKNOWN) { + ubi_err(ubi, "PEB %d in freelist has unknown EC", + aeb->pnum); + aeb->ec = ai->mean_ec; + } + err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); + if (err) { + ubi_err(ubi, "Erase failed for PEB %d in freelist", + aeb->pnum); + list_add(&aeb->u.list, &ai->erase); + continue; + } + aeb->ec += 1; dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 541c17917a4dfc14fe39803d0ef25d19668553d5..8d28b220541d450b25b0ae177f26318a35a5ed29 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -125,6 +125,9 @@ struct class ubi_class = { static ssize_t dev_attribute_show(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t dev_attribute_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); /* UBI device attributes (correspond to files in '//class/ubi/ubiX') */ static struct device_attribute dev_eraseblock_size = @@ -151,6 +154,13 @@ static struct device_attribute dev_mtd_num = __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); static struct device_attribute dev_ro_mode = __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_mtd_trigger_scrub = + __ATTR(scrub_all, 0644, + dev_attribute_show, dev_attribute_store); +static struct device_attribute dev_mtd_max_scrub_sqnum = + __ATTR(scrub_max_sqnum, 0444, dev_attribute_show, NULL); +static struct device_attribute dev_mtd_min_scrub_sqnum = + __ATTR(scrub_min_sqnum, 0444, dev_attribute_show, NULL); /** * ubi_volume_notify - send a volume change notification. @@ -343,6 +353,17 @@ int ubi_major2num(int major) return ubi_num; } +static unsigned long long get_max_sqnum(struct ubi_device *ubi) +{ + unsigned long long max_sqnum; + + spin_lock(&ubi->ltree_lock); + max_sqnum = ubi->global_sqnum - 1; + spin_unlock(&ubi->ltree_lock); + + return max_sqnum; +} + /* "Show" method for files in '//class/ubi/ubiX/' */ static ssize_t dev_attribute_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -389,6 +410,15 @@ static ssize_t dev_attribute_show(struct device *dev, ret = sprintf(buf, "%d\n", ubi->mtd->index); else if (attr == &dev_ro_mode) ret = sprintf(buf, "%d\n", ubi->ro_mode); + else if (attr == &dev_mtd_trigger_scrub) + ret = snprintf(buf, PAGE_SIZE, "%d\n", + atomic_read(&ubi->scrub_work_count)); + else if (attr == &dev_mtd_max_scrub_sqnum) + ret = snprintf(buf, PAGE_SIZE, "%llu\n", + get_max_sqnum(ubi)); + else if (attr == &dev_mtd_min_scrub_sqnum) + ret = snprintf(buf, PAGE_SIZE, "%llu\n", + ubi_wl_scrub_get_min_sqnum(ubi)); else ret = -EINVAL; @@ -409,10 +439,45 @@ static struct attribute *ubi_dev_attrs[] = { &dev_bgt_enabled.attr, &dev_mtd_num.attr, &dev_ro_mode.attr, + &dev_mtd_trigger_scrub.attr, + &dev_mtd_max_scrub_sqnum.attr, + &dev_mtd_min_scrub_sqnum.attr, NULL }; ATTRIBUTE_GROUPS(ubi_dev); +static ssize_t dev_attribute_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret = count; + struct ubi_device *ubi; + unsigned long long scrub_sqnum; + + ubi = container_of(dev, struct ubi_device, dev); + ubi = ubi_get_device(ubi->ubi_num); + if (!ubi) + return -ENODEV; + + if (attr == &dev_mtd_trigger_scrub) { + if (kstrtoull(buf, 10, &scrub_sqnum)) { + ret = -EINVAL; + goto out; + } + if (!ubi->lookuptbl) { + pr_err("lookuptbl is null"); + goto out; + } + ret = ubi_wl_scrub_all(ubi, scrub_sqnum); + if (ret == 0) + ret = count; + } + +out: + ubi_put_device(ubi); + return ret; +} + static void dev_release(struct device *dev) { struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index b6fb8f945c21919a28436ead4f4368fef672e907..f0361651832ceaa5bb730ad219298c5ac9734749 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -1105,6 +1105,16 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, dbg_io("write VID header to PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + /* + * Re-erase the PEB before using it. This should minimize any issues + * from decay of charge in this block. + */ + if (ubi->wl_is_inited) { + err = ubi_wl_re_erase_peb(ubi, pnum); + if (err) + return err; + } + err = self_check_peb_ec_hdr(ubi, pnum); if (err) return err; @@ -1123,6 +1133,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); + if (!err && ubi->wl_is_inited) + ubi_wl_update_peb_sqnum(ubi, pnum, vid_hdr); return err; } diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 697dbcba7371b13b561289f152c20cedcf283529..13efd641a0c2d3cc4abb7fbb6681162a7d3ff2d3 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -183,6 +183,8 @@ struct ubi_vid_io_buf { * @u.list: link in the protection queue * @ec: erase counter * @pnum: physical eraseblock number + * @tagged_scrub_all: if the entry is tagged for scrub all + * @sqnum: The sequence number of the vol header. * * This data structure is used in the WL sub-system. Each physical eraseblock * has a corresponding &struct wl_entry object which may be kept in different @@ -195,6 +197,8 @@ struct ubi_wl_entry { } u; int ec; int pnum; + unsigned int tagged_scrub_all:1; + unsigned long long sqnum; }; /** @@ -624,6 +628,9 @@ struct ubi_device { struct task_struct *bgt_thread; int thread_enabled; char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; + bool scrub_in_progress; + atomic_t scrub_work_count; + int wl_is_inited; /* I/O sub-system's stuff */ long long flash_size; @@ -919,6 +926,12 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, int ubi_is_erase_work(struct ubi_work *wrk); void ubi_refill_pools(struct ubi_device *ubi); int ubi_ensure_anchor_pebs(struct ubi_device *ubi); +ssize_t ubi_wl_scrub_all(struct ubi_device *ubi, + unsigned long long scrub_sqnum); +void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr); +unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi); +int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum); /* io.c */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index a8f74d9bba4ff82c6d3430e3656f90dc7d5c4b47..5c0332e311382112950334a5e4d6d8388bc7ff31 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -102,6 +102,7 @@ #include #include #include +#include #include "ubi.h" #include "wl.h" @@ -487,6 +488,42 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, return err; } +int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum) +{ + int err; + struct ubi_wl_entry *e; + struct ubi_ec_hdr *ec_hdr; + + spin_lock(&ubi->wl_lock); + e = ubi->lookuptbl[pnum]; + spin_unlock(&ubi->wl_lock); + + dbg_wl("Re-erase PEB %d, EC %u", e->pnum, e->ec); + + err = self_check_ec(ubi, e->pnum, e->ec); + if (err) + return -EINVAL; + + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); + if (!ec_hdr) + return -ENOMEM; + + err = ubi_io_sync_erase(ubi, e->pnum, 0); + if (err < 0) + goto out_free; + + dbg_wl("re-erased PEB %d, EC %u", e->pnum, e->ec); + + ec_hdr->ec = cpu_to_be64(e->ec); + + err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); + if (err) + goto out_free; +out_free: + kfree(ec_hdr); + return err; +} + /** * serve_prot_queue - check if it is time to stop protecting PEBs. * @ubi: UBI device description object @@ -862,9 +899,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } /* The PEB has been successfully moved */ - if (scrubbing) - ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", - e1->pnum, vol_id, lnum, e2->pnum); + if (scrubbing) { + spin_lock(&ubi->wl_lock); + if (e1->tagged_scrub_all) { + WARN_ON(atomic_read(&ubi->scrub_work_count) <= 0); + atomic_dec(&ubi->scrub_work_count); + e1->tagged_scrub_all = 0; + e2->tagged_scrub_all = 0; + } else { + ubi_msg(ubi, + "scrubbed PEB %d (LEB %d:%d),data moved to PEB %d", + e1->pnum, vol_id, lnum, e2->pnum); + } + spin_unlock(&ubi->wl_lock); + } ubi_free_vid_buf(vidb); spin_lock(&ubi->wl_lock); @@ -1226,6 +1274,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + e->sqnum = UBI_UNKNOWN; if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to @@ -1262,6 +1311,20 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, } else if (in_wl_tree(e, &ubi->scrub)) { self_check_in_wl_tree(ubi, e, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub); + + /* + * Since this PEB has been put we dont need to worry + * about it anymore + */ + if (e->tagged_scrub_all) { + int wrk_count; + + wrk_count = atomic_read(&ubi->scrub_work_count); + WARN_ON(wrk_count <= 0); + + atomic_dec(&ubi->scrub_work_count); + e->tagged_scrub_all = 0; + } } else if (in_wl_tree(e, &ubi->erroneous)) { self_check_in_wl_tree(ubi, e, &ubi->erroneous); rb_erase(&e->u.rb, &ubi->erroneous); @@ -1293,6 +1356,197 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, return err; } +/** + * ubi_wl_scrub_get_min_sqnum - Return the minimum sqnum of the used/pq/scrub. + * @ubi: UBI device description object + * + * This function returns the minimum sqnum of the PEB that are currently in use. + * + * Return the min sqnum if there are any used PEB's otherwise return ~(0) + * + */ +unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi) +{ + int i; + struct ubi_wl_entry *e, *tmp; + struct rb_node *node; + unsigned long long min_sqnum = ~0; + + spin_lock(&ubi->wl_lock); + + /* Go through the pq list */ + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { + list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { + if (e->sqnum < min_sqnum) + min_sqnum = e->sqnum; + } + } + + /* Go through used PEB tree */ + for (node = rb_first(&ubi->used); node; node = rb_next(node)) { + e = rb_entry(node, struct ubi_wl_entry, u.rb); + self_check_in_wl_tree(ubi, e, &ubi->used); + if (e->sqnum < min_sqnum) + min_sqnum = e->sqnum; + } + /* Go through scrub PEB tree */ + for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { + e = rb_entry(node, struct ubi_wl_entry, u.rb); + self_check_in_wl_tree(ubi, e, &ubi->scrub); + if (e->sqnum < min_sqnum) + min_sqnum = e->sqnum; + } + spin_unlock(&ubi->wl_lock); + return min_sqnum; +} + +/** + * ubi_wl_update_peb_sqnum - Update the vol hdr sqnum of the PEB. + * @pnum: The PEB number. + * @vid_hdr: The vol hdr being written to the PEB. + * + */ +void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr) +{ + struct ubi_wl_entry *e; + + spin_lock(&ubi->wl_lock); + e = ubi->lookuptbl[pnum]; + e->sqnum = be64_to_cpu(vid_hdr->sqnum); + e->tagged_scrub_all = 0; + spin_unlock(&ubi->wl_lock); +} + +static int is_ubi_readonly(struct ubi_device *ubi) +{ + int is_readonly = 0; + + spin_lock(&ubi->wl_lock); + if (ubi->ro_mode || !ubi->thread_enabled || + ubi_dbg_is_bgt_disabled(ubi)) + is_readonly = 1; + spin_unlock(&ubi->wl_lock); + + return is_readonly; +} + +/** + * ubi_wl_scan_all - Scan all PEB's + * @ubi: UBI device description object + * @scrub_sqnum: The max seqnum of the PEB to scrub from the used/pq lists + * + * This function schedules all device PEBs for scrubbing if the sqnum of the + * vol hdr is less than the sqnum in the trigger. + * + * Return 0 in case of success, (negative) error code otherwise + * + */ +ssize_t ubi_wl_scrub_all(struct ubi_device *ubi, unsigned long long scrub_sqnum) +{ + struct rb_node *node; + struct ubi_wl_entry *e, *tmp; + int scrub_count = 0; + int total_scrub_count = 0; + int err, i; + + if (!ubi->lookuptbl) { + ubi_err(ubi, "lookuptbl is null"); + return -ENOENT; + } + + if (is_ubi_readonly(ubi)) { + ubi_err(ubi, "Cannot *Initiate* scrub:background thread disabled or readonly!"); + return -EROFS; + } + + /* Wait for all currently running work to be done! */ + down_write(&ubi->work_sem); + spin_lock(&ubi->wl_lock); + ubi_msg(ubi, "Scrub triggered sqnum = %llu!", scrub_sqnum); + + if (ubi->scrub_in_progress) { + ubi_err(ubi, "Scrub already in progress, ignoring the trigger"); + spin_unlock(&ubi->wl_lock); + up_write(&ubi->work_sem); /* Allow new work to start. */ + return -EBUSY; + } + ubi->scrub_in_progress = true; + + /* Go through scrub PEB tree and count pending */ + for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { + e = rb_entry(node, struct ubi_wl_entry, u.rb); + self_check_in_wl_tree(ubi, e, &ubi->scrub); + e->tagged_scrub_all = 1; + total_scrub_count++; + } + + /* Move all used pebs to scrub tree */ + node = rb_first(&ubi->used); + while (node != NULL) { + e = rb_entry(node, struct ubi_wl_entry, u.rb); + self_check_in_wl_tree(ubi, e, &ubi->used); + node = rb_next(node); + + if (e->sqnum > scrub_sqnum) + continue; + rb_erase(&e->u.rb, &ubi->used); + wl_tree_add(e, &ubi->scrub); + e->tagged_scrub_all = 1; + scrub_count++; + total_scrub_count++; + } + + /* Move all protected pebs to scrub tree */ + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { + list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { + + if (e->sqnum > scrub_sqnum) + continue; + + list_del(&e->u.list); + wl_tree_add(e, &ubi->scrub); + e->tagged_scrub_all = 1; + scrub_count++; + total_scrub_count++; + } + } + + atomic_set(&ubi->scrub_work_count, total_scrub_count); + spin_unlock(&ubi->wl_lock); + up_write(&ubi->work_sem); /* Allow new work to start. */ + + /* + * Technically scrubbing is the same as wear-leveling, so it is done + * by the WL worker. + */ + err = ensure_wear_leveling(ubi, 0); + if (err) { + ubi_err(ubi, "Failed to start the WL worker err =%d", err); + return err; + } + ubi_msg(ubi, "Scheduled %d PEB's for scrubbing!", scrub_count); + ubi_msg(ubi, "Total PEB's for scrub = %d", total_scrub_count); + + /* Wait for scrub to finish */ + while (atomic_read(&ubi->scrub_work_count) > 0) { + /* Poll every second to check if the scrub work is done */ + msleep(1000); + + if (is_ubi_readonly(ubi)) { + ubi_err(ubi, "Cannot *Complete* scrub:background thread disabled or readonly!"); + return -EROFS; + } + wake_up_process(ubi->bgt_thread); + } + + spin_lock(&ubi->wl_lock); + ubi->scrub_in_progress = false; + spin_unlock(&ubi->wl_lock); + ubi_msg(ubi, "Done scrubbing %d PEB's!", scrub_count); + return 0; +} + /** * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. * @ubi: UBI device description object @@ -1622,6 +1876,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) e->pnum = aeb->pnum; e->ec = aeb->ec; + e->tagged_scrub_all = 0; + e->sqnum = aeb->sqnum; ubi_assert(e->ec >= 0); wl_tree_add(e, &ubi->free); @@ -1644,6 +1900,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) e->pnum = aeb->pnum; e->ec = aeb->ec; + e->tagged_scrub_all = 0; + e->sqnum = aeb->sqnum; ubi->lookuptbl[e->pnum] = e; if (!aeb->scrub) { @@ -1724,6 +1982,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (err) goto out_free; + ubi->wl_is_inited = 1; return 0; out_free: diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 6ea963e3b89a1ab1c78ccb4d63c99a16e1bfbe0b..85ffd0561827ee235c8b169d5c843f20b0a37882 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -1009,31 +1009,34 @@ EXPORT_SYMBOL(arcnet_interrupt); static void arcnet_rx(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); - struct archdr pkt; + union { + struct archdr pkt; + char buf[512]; + } rxdata; struct arc_rfc1201 *soft; int length, ofs; - soft = &pkt.soft.rfc1201; + soft = &rxdata.pkt.soft.rfc1201; - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE); - if (pkt.hard.offset[0]) { - ofs = pkt.hard.offset[0]; + lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE); + if (rxdata.pkt.hard.offset[0]) { + ofs = rxdata.pkt.hard.offset[0]; length = 256 - ofs; } else { - ofs = pkt.hard.offset[1]; + ofs = rxdata.pkt.hard.offset[1]; length = 512 - ofs; } /* get the full header, if possible */ - if (sizeof(pkt.soft) <= length) { - lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft)); + if (sizeof(rxdata.pkt.soft) <= length) { + lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft)); } else { - memset(&pkt.soft, 0, sizeof(pkt.soft)); + memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft)); lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); } arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n", - bufnum, pkt.hard.source, pkt.hard.dest, length); + bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length); dev->stats.rx_packets++; dev->stats.rx_bytes += length + ARC_HDR_SIZE; @@ -1042,13 +1045,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum) if (arc_proto_map[soft->proto]->is_ip) { if (BUGLVL(D_PROTO)) { struct ArcProto - *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]], + *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]], *newp = arc_proto_map[soft->proto]; if (oldp != newp) { arc_printk(D_PROTO, dev, "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n", - soft->proto, pkt.hard.source, + soft->proto, rxdata.pkt.hard.source, newp->suffix, oldp->suffix); } } @@ -1057,10 +1060,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum) lp->default_proto[0] = soft->proto; /* in striking contrast, the following isn't a hack. */ - lp->default_proto[pkt.hard.source] = soft->proto; + lp->default_proto[rxdata.pkt.hard.source] = soft->proto; } /* call the protocol-specific receiver. */ - arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length); + arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length); } static void null_rx(struct net_device *dev, int bufnum, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8820fb1aec5b4fb85abbdeda6932af1870d41b54..d52fd842ef1fee908d8f03fee798cdf4faeb47aa 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1759,7 +1759,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) slave_disable_netpoll(new_slave); err_close: - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_restore_mac: @@ -1960,7 +1961,8 @@ static int __bond_release_one(struct net_device *bond_dev, dev_set_mtu(slave_dev, slave->original_mtu); - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; bond_free_slave(slave); @@ -3963,7 +3965,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) * this to-be-skipped slave to send a packet out. */ old_arr = rtnl_dereference(bond->slave_arr); - for (idx = 0; idx < old_arr->count; idx++) { + for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) { if (skipslave == old_arr->arr[idx]) { old_arr->arr[idx] = old_arr->arr[old_arr->count-1]; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3dccd3200d5d834f13ad036c290c51e3091052e..7d35f6737499cb5cdf48cea3e4cff66d1ca4f1a8 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -97,6 +97,9 @@ #define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) +/* interrupt register */ +#define INT_STS_PENDING 0x8000 + /* brp extension register */ #define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_SHIFT 0 @@ -1029,10 +1032,16 @@ static int c_can_poll(struct napi_struct *napi, int quota) u16 curr, last = priv->last_status; int work_done = 0; - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); - /* Ack status on C_CAN. D_CAN is self clearing */ - if (priv->type != BOSCH_D_CAN) - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + /* Only read the status register if a status interrupt was pending */ + if (atomic_xchg(&priv->sie_pending, 0)) { + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); + /* Ack status on C_CAN. D_CAN is self clearing */ + if (priv->type != BOSCH_D_CAN) + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + } else { + /* no change detected ... */ + curr = last; + } /* handle state changes */ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { @@ -1083,10 +1092,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); + int reg_int; - if (!priv->read_reg(priv, C_CAN_INT_REG)) + reg_int = priv->read_reg(priv, C_CAN_INT_REG); + if (!reg_int) return IRQ_NONE; + /* save for later use */ + if (reg_int & INT_STS_PENDING) + atomic_set(&priv->sie_pending, 1); + /* disable all interrupts and schedule the NAPI */ c_can_irq_control(priv, false); napi_schedule(&priv->napi); diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 8acdc7fa4792f24438cb0cd84d73b44f0c46f7d8..d5567a7c1c6d4652a04f8135b381387e3a2e5304 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -198,6 +198,7 @@ struct c_can_priv { struct net_device *dev; struct device *device; atomic_t tx_active; + atomic_t sie_pending; unsigned long tx_dir; int last_status; u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index baef09b9449f9b864163e070b09d64c82091f28e..6b866d0451b21d69cd7ded47f1d959c3a31ce5b4 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -923,6 +923,7 @@ static int flexcan_chip_start(struct net_device *dev) reg_mecr = flexcan_read(®s->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; flexcan_write(reg_mecr, ®s->mecr); + reg_mecr |= FLEXCAN_MECR_ECCDIS; reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); flexcan_write(reg_mecr, ®s->mecr); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index eb7173713bbcb0c339ae6ccbf99fc8014fb17133..a2c4048c07bec8e7640c836c71848b427a314f52 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -613,6 +613,7 @@ static int slcan_open(struct tty_struct *tty) sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + free_netdev(sl->dev); err_exit: rtnl_unlock(); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index d8c448beab24d093494ca16caf73612d86f59360..ec0b3d0258676ce3bb350208974109574efc70af 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -627,7 +627,7 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, static int mcp251x_hw_reset(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); - u8 reg; + unsigned long timeout; int ret; /* Wait for oscillator startup timer after power up */ @@ -641,10 +641,19 @@ static int mcp251x_hw_reset(struct spi_device *spi) /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); - reg = mcp251x_read_reg(spi, CANSTAT); - if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF) - return -ENODEV; - + /* Wait for reset to finish */ + timeout = jiffies + HZ; + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) != + CANCTRL_REQOP_CONF) { + usleep_range(MCP251X_OST_DELAY_MS * 1000, + MCP251X_OST_DELAY_MS * 1000 * 2); + + if (time_after(jiffies, timeout)) { + dev_err(&spi->dev, + "MCP251x didn't enter in conf mode after reset\n"); + return -EBUSY; + } + } return 0; } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 5d5012337d9eea9a6139dae399ba373a95217dfc..014b9ae3dc17aebf47e9d3fddc032c542cf0b73e 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -632,6 +632,7 @@ static int gs_can_open(struct net_device *netdev) rc); usb_unanchor_urb(urb); + usb_free_urb(urb); break; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 838545ce468d1bc9af34b8d451107f966fa5972f..e626c2afbbb11e94b050e5294c1cd75fe22f46d6 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -108,7 +108,7 @@ struct pcan_usb_msg_context { u8 *end; u8 rec_cnt; u8 rec_idx; - u8 rec_data_idx; + u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; @@ -552,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { - int err = pcan_usb_decode_ts(mc, !mc->rec_idx); + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; + + /* Next packet in the buffer will have a timestamp on a single + * byte + */ + mc->rec_ts_idx++; } switch (f) { @@ -638,10 +643,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) cf->can_dlc = get_can_dlc(rec_len); - /* first data packet timestamp is a word */ - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) + /* Only first packet timestamp is a word */ + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; + /* Next packet in the buffer will have a timestamp on a single byte */ + mc->rec_ts_idx++; + /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { @@ -695,7 +703,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); - mc.rec_data_idx++; } } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index ce0a352a5eaabf17e2d395489bb969c815826f7b..6cd4317fe94dfc6a8c5bae02a8f2bf0e775d1324 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -774,7 +774,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_free_candev; diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 27861c417c9404c9c3df841daac95015978f3a69..3e44164736079d467644b63780002bbade28f933 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -1007,9 +1007,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); - free_candev(priv->netdev); - unlink_all_urbs(priv); + free_candev(priv->netdev); } } diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig index 99b30353541ab7851cef3e5a9cdd10448f274a91..9e87d7b8360f59575b824ee734298409fcffbcb0 100644 --- a/drivers/net/ethernet/amazon/Kconfig +++ b/drivers/net/ethernet/amazon/Kconfig @@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON config ENA_ETHERNET tristate "Elastic Network Adapter (ENA) support" - depends on (PCI_MSI && X86) + depends on PCI_MSI && !CPU_BIG_ENDIAN ---help--- This driver supports Elastic Network Adapter (ENA)" diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index fcdf5dda448f9c7acf5a0a537cbbcb0de76b09df..77d99c532917c5d0df0dd0dfcb32b4e08ec4a2af 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev) /* * Transmit a packet */ -static int +static netdev_tx_t am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index d2bc8e5dcd23b21dab55b377961d358b55a29ede..35a9f252ceb6c8ea1a8a04129e6c443a1a058ca5 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr *init_rec ); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -770,7 +771,8 @@ static void lance_tx_timeout (struct net_device *dev) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); struct lance_ioreg *IO = lp->iobase; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 9e80a76c3dfe1d274137402f5b8d2e1a91cc44b3..76393ae4cf0aa1beb6b9c33090c7374fb2d1b924 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -893,7 +893,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_regs *ll = lp->ll; diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 3d8c6b2cdea4ca5906e0abb09a0523a5aa03b74c..09271665712da205dc13f39508c20df14ea3e8f8 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -235,7 +235,8 @@ struct lance_private { static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); @@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev ) } -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +static netdev_tx_t +lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, len; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 3153465d4d02f9cdc55c2b8e19c74fdb04c5c632..acbbe4e5a2f8c57c336236cc91238dc6bdf4d654 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int entry, skblen, len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1e4e8b245cd55e8cc6cf6a001a168b73d98ca2ba..1df7f5da8411f358416ead234a7ab47e7b57b8d6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1390,7 +1390,7 @@ static int xgbe_close(struct net_device *netdev) return 0; } -static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -1399,7 +1399,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) struct xgbe_ring *ring; struct xgbe_packet_data *packet; struct netdev_queue *txq; - int ret; + netdev_tx_t ret; DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c770ca37c9b21eb77af38e404489653109aae711..a7d30731d376f15b73ff7b732691154f23aa03f6 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -261,6 +261,9 @@ static int emac_rockchip_remove(struct platform_device *pdev) if (priv->regulator) regulator_disable(priv->regulator); + if (priv->soc_data->need_div_macclk) + clk_disable_unprepare(priv->macclk); + free_netdev(ndev); return err; } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c4078401b7ded15a8084cba093c8cb1f88df1754..900f2f706cbc74c3721a29e056a974adaa93f01d 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -571,12 +571,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) /* * tx request callback */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcm_enet_priv *priv; struct bcm_enet_desc *desc; u32 len_stat; - int ret; + netdev_tx_t ret; priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a9681b191304a91b413e67f84df148208cb57fa3..ce8a777b1e975502912dcb5ae7396799b243f5c0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3540,6 +3540,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1bb923e3a2bc4d368f5e18823b92b74b9078d297..a234044805977822958cbba81f716b2e69213294 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1078,7 +1078,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, break; } - return 0; + return ret; } static void bcmgenet_power_up(struct bcmgenet_priv *priv, @@ -1914,6 +1914,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) */ if (priv->internal_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -2531,6 +2533,10 @@ static void bcmgenet_irq_task(struct work_struct *work) bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); } + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) + phy_init_hw(priv->dev->phydev); + /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) phy_mac_interrupt(priv->phydev, @@ -2627,8 +2633,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } /* all other interested interrupts handled in bottom half */ - status &= (UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_MPD_R); + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R); if (status) { /* Save irq status for bottom-half processing. */ spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 3f8858db12eb3aae8e2a94b7e0fd8da1a4a281fd..dcf10ea60e7fe831d0cfe1992cdc7fd6f6644872 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -362,6 +362,7 @@ struct bcmgenet_mib_counters { #define EXT_ENERGY_DET_MASK (1 << 12) #define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) #define RGMII_LINK (1 << 4) #define OOB_DISABLE (1 << 5) #define RGMII_MODE_EN (1 << 6) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9bd90a7c4d40ffb8748129aa1a009c5557c1ac51..b0b9feeb173b333ddb2649b3922a1afed89f1533 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -328,7 +328,11 @@ int bcmgenet_mii_config(struct net_device *dev) */ if (priv->ext_phy) { reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } @@ -342,11 +346,12 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; - u32 phy_flags; + u32 phy_flags = 0; int ret; /* Communicate the integrated PHY revision */ - phy_flags = priv->gphy_rev; + if (priv->internal_phy) + phy_flags = priv->gphy_rev; /* Initialize link state variables that bcmgenet_mii_setup() uses */ priv->old_link = -1; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index f1b81187a20101eca9d491a61c08bf1e4e7af30c..dc7953894c3517c88ff51d78950ae64fecbd6363 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); static irqreturn_t sbmac_intr(int irq, void *dev_instance); -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); static int sbmac_init(struct platform_device *pldev, long long base); static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); @@ -2032,7 +2032,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Return value: * nothing ********************************************************************* */ -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6ee2ed30626bfdf7c221a2c7846bc0abf90ac9ad..306b4b3206168cbbe6d63eb7f4d941d8c2d0ad69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -266,8 +266,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, enum cxgb4_dcb_state_input input = ((pcmd->u.dcb.control.all_syncd_pkd & FW_PORT_CMD_ALL_SYNCD_F) - ? CXGB4_DCB_STATE_FW_ALLSYNCED - : CXGB4_DCB_STATE_FW_INCOMPLETE); + ? CXGB4_DCB_INPUT_FW_ALLSYNCED + : CXGB4_DCB_INPUT_FW_INCOMPLETE); if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index ccf24d3dc982478b61e79ee2e850183b58701f45..2c418c405c5088852843cd6bf1e691c3f213c3b7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -67,7 +67,7 @@ do { \ if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ cxgb4_dcb_state_fsm((__dev), \ - CXGB4_DCB_STATE_FW_ALLSYNCED); \ + CXGB4_DCB_INPUT_FW_ALLSYNCED); \ } while (0) /* States we can be in for a port's Data Center Bridging. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 23d6c44dc459cdfad5cb8e768874d60092d31eca..9ce1ad3d950ca94741e74b1ba3860a74ac646c04 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, static int alloc_uld_rxqs(struct adapter *adap, struct sge_uld_rxq_info *rxq_info, bool lro) { - struct sge *s = &adap->sge; unsigned int nq = rxq_info->nrxq + rxq_info->nciq; + int i, err, msi_idx, que_idx = 0, bmap_idx = 0; struct sge_ofld_rxq *q = rxq_info->uldrxq; unsigned short *ids = rxq_info->rspq_id; - unsigned int bmap_idx = 0; + struct sge *s = &adap->sge; unsigned int per_chan; - int i, err, msi_idx, que_idx = 0; per_chan = rxq_info->nrxq / adap->params.nports; @@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap, if (msi_idx >= 0) { bmap_idx = get_msix_idx_from_bmap(adap); + if (bmap_idx < 0) { + err = -ENOSPC; + goto freeout; + } msi_idx = adap->msix_info_ulds[bmap_idx].idx; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index ebeeb3581b9c5b81214ef6c318d40af62764af5e..b4b435276a18d7b9f550f1a875c897ba54f3319e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3541,7 +3541,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.param[0].mnem = cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); - c.param[0].val = (__force __be32)op; + c.param[0].val = cpu_to_be32(op); return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index f7882c1fde16de4bc978e1f650207a735aecf0b6..4436a0307f32e93015d1febb79c235867e415f37 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -174,6 +174,7 @@ struct hip04_priv { dma_addr_t rx_phys[RX_DESC_NUM]; unsigned int rx_head; unsigned int rx_buf_size; + unsigned int rx_cnt_remaining; struct device_node *phy_node; struct phy_device *phy; @@ -487,7 +488,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); struct net_device *ndev = priv->ndev; struct net_device_stats *stats = &ndev->stats; - unsigned int cnt = hip04_recv_cnt(priv); struct rx_desc *desc; struct sk_buff *skb; unsigned char *buf; @@ -500,8 +500,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) /* clean up tx descriptors */ tx_remaining = hip04_tx_reclaim(ndev, false); - - while (cnt && !last) { + priv->rx_cnt_remaining += hip04_recv_cnt(priv); + while (priv->rx_cnt_remaining && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); if (unlikely(!skb)) { @@ -547,11 +547,13 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) hip04_set_recv_desc(priv, phys); priv->rx_head = RX_NEXT(priv->rx_head); - if (rx >= budget) + if (rx >= budget) { + --priv->rx_cnt_remaining; goto done; + } - if (--cnt == 0) - cnt = hip04_recv_cnt(priv); + if (--priv->rx_cnt_remaining == 0) + priv->rx_cnt_remaining += hip04_recv_cnt(priv); } if (!(priv->reg_inten & RCV_INT)) { @@ -636,6 +638,7 @@ static int hip04_mac_open(struct net_device *ndev) int i; priv->rx_head = 0; + priv->rx_cnt_remaining = 0; priv->tx_head = 0; priv->tx_tail = 0; hip04_reset_ppe(priv); @@ -950,7 +953,6 @@ static int hip04_remove(struct platform_device *pdev) hip04_free_ring(ndev, d); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index de23a0ead5d76e8a16eeda40d0aa416b5711cf13..d06efcd5f13b164f18ee8732ce661ce5f6dc83c6 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -166,11 +166,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev, { u32 time_cnt; u32 reg_value; + int ret; regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val); for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) { - regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); + ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, ®_value); + if (ret) + return ret; + reg_value &= st_msk; if ((!!check_st) == (!!reg_value)) break; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 2a81f6d7214043757f4cd8d58f55cb7641e22618..8936f19e9325fef74797fd38ddccf4a5399209d2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -628,6 +628,7 @@ static int e1000_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) rxdr[i].count = rxdr->count; + err = 0; if (netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ err = e1000_setup_all_rx_resources(adapter); @@ -648,14 +649,13 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->rx_ring = rxdr; adapter->tx_ring = txdr; err = e1000_up(adapter); - if (err) - goto err_setup; } kfree(tx_old); kfree(rx_old); clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; + return err; + err_setup_tx: e1000_free_all_rx_resources(adapter); err_setup_rx: @@ -667,7 +667,6 @@ static int e1000_set_ringparam(struct net_device *netdev, err_alloc_tx: if (netif_running(adapter->netdev)) e1000_up(adapter); -err_setup: clear_bit(__E1000_RESETTING, &adapter->flags); return err; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index dc7d671b903c570e968e4bcdf3fb056351bcd731..625008e8cb0dfaf488e06a1221d0d7a0b8367369 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1447,6 +1447,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 67163ca898ba2abca4e4e66eecdf76a5f5c3984c..6374c8fc76a8d4e673a8e974e556cb439df4ee08 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -227,7 +227,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 886378c5334fac58c29725b1e54385d80a6b0498..043b69b5843b0c481a2fe51f991c4dfcaf18558d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11360,6 +11360,7 @@ static void i40e_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { @@ -11368,6 +11369,7 @@ static void i40e_remove(struct pci_dev *pdev) pf->vsi[i] = NULL; } } + rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); @@ -11513,7 +11515,13 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index f1feceab758a5235237df685f2b8666a5a9c0ffd..41cbcb0ac2d94380caaf14a39c92e18b9706a634 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -604,7 +604,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + strncpy(pf->ptp_caps.name, i40e_driver_name, + sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 54b8ee2583f14da4e6e2f9951d672c784a8edf8d..7484ad3c955dbc5ecafb49010b0b556a353a5157 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2000,6 +2000,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } + + if (vf->pf_set_mac && + ether_addr_equal(al->list[i].addr, + vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", + vf->default_lan_addr.addr, vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } } vsi = pf->vsi[vf->lan_vsi_idx]; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7956176c2c73e5c247e95d70f748bf333ad9d576..7e35bd665630725333fd795d36f014ae4e5bba11 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1677,7 +1677,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter) if ((hw->phy.media_type == e1000_media_type_copper) && (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { swap_now = true; - } else if (!(connsw & E1000_CONNSW_SERDESD)) { + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { /* copper signal takes time to appear */ if (adapter->copper_tries < 4) { adapter->copper_tries++; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 9eb9b68f8935ea12f106109b404cd58e3ec4930a..ae1f963b6092324ad01c864391f57a6445a2c09b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -65,9 +65,15 @@ * * The 40 bit 82580 SYSTIM overflows every * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter + * needs to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, so we aim for 8 + * minutes to be sure the actual interval is shorter than 9.16 minutes. */ -#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) #define IGB_PTP_TX_TIMEOUT (HZ * 15) #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a5428b6abdac2d6db342cc87be8e98e31f5745c4..8ad20b7852ed7794188c0d9764ffebd6e7381283 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4804,6 +4804,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; + u64 action; spin_lock(&adapter->fdir_perfect_lock); @@ -4812,12 +4813,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { + action = filter->action; + if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) + action = + (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + (action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + adapter->rx_ring[action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c9f4b5412844d37691225cfb8d2b631fa4deba59..b97a070074b7af86b5242e3d12a0aa798ed6047b 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3114,7 +3114,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, skb_put(skb, len); if (dev->features & NETIF_F_RXCSUM) { - skb->csum = csum; + skb->csum = le16_to_cpu(csum); skb->ip_summed = CHECKSUM_COMPLETE; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 59dbecd19c93fc1d42f665c0137d33875857fb85..49f692907a30bfe3c5b51d1e76e1dee51fbbb986 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4946,6 +4946,13 @@ static const struct dmi_system_id msi_blacklist[] = { DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, {} }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 74d2db50586682cf6f7ce83e92fecc74c866b636..6068e7c4fc7e964cedf1c569056c4445fe5a178a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1679,6 +1679,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); break; case ETHTOOL_GRXCLSRLALL: + cmd->data = MAX_NUM_OF_FS_RULES; while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { err = mlx4_en_get_flow(dev, cmd, i); if (!err) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 79944302dd4632c08fa8919d04c218be6fcb0bd1..7d1e8ab956e644816d2791aa8ce0358c414c2151 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -470,12 +470,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev) priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; } -static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) +static int +mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + int vf) { - /* reduce the sink counter */ - return (dev->caps.max_counters - 1 - - (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) - / MLX4_MAX_PORTS; + struct mlx4_active_ports actv_ports; + int ports, counters_guaranteed; + + /* For master, only allocate according to the number of phys ports */ + if (vf == mlx4_master_func_num(dev)) + return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; + + /* calculate real number of ports for the VF */ + actv_ports = mlx4_get_active_ports(dev, vf); + ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT; + + /* If we do not have enough counters for this VF, do not + * allocate any for it. '-1' to reduce the sink counter. + */ + if ((res_alloc->res_reserved + counters_guaranteed) > + (dev->caps.max_counters - 1)) + return 0; + + return counters_guaranteed; } int mlx4_init_resource_tracker(struct mlx4_dev *dev) @@ -483,7 +502,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i, j; int t; - int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); priv->mfunc.master.res_tracker.slave_list = kzalloc(dev->num_slaves * sizeof(struct slave_list), @@ -600,16 +618,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_COUNTER: res_alloc->quota[t] = dev->caps.max_counters; - if (t == mlx4_master_func_num(dev)) - res_alloc->guaranteed[t] = - MLX4_PF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else if (t <= max_vfs_guarantee_counter) - res_alloc->guaranteed[t] = - MLX4_VF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else - res_alloc->guaranteed[t] = 0; + res_alloc->guaranteed[t] = + mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); res_alloc->res_free -= res_alloc->guaranteed[t]; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d1a3a35ba87b1c6696154c697133ab8f22c1e33a..a10f042df01f041dcc4bbed86a968a37e574ec8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1757,7 +1757,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, unlock: mutex_unlock(&esw->state_lock); - return 0; + return err; } int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 585a40cc6470b231d51e64678d5f414ce26aab3e..8460c4807567c5626c752834d8b5645f5fa2028e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2191,6 +2191,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; } /* Map all priorities to traffic class 0. */ diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 20cb85bc0c5f8660a259dfb8dfd7213df71373a1..6135d90f368fa0bddeb7dac29305d7aadc3b698a 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1156,7 +1156,7 @@ ks8695_timeout(struct net_device *ndev) * sk_buff and adds it to the TX ring. It then kicks the TX DMA * engine to ensure transmission begins. */ -static int +static netdev_tx_t ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ks8695_priv *ksp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 2fc5cd56c0a846d30e86ba7b6efa0bbfaedf264f..8dc1f0277117d9acf3618163ccdd9a4b87295fd4 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1020,9 +1020,9 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) * spin_lock_irqsave is required because tx and rx should be mutual exclusive. * So while tx is in-progress, prevent IRQ interrupt from happenning. */ -static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - int retv = NETDEV_TX_OK; + netdev_tx_t retv = NETDEV_TX_OK; struct ks_net *ks = netdev_priv(netdev); disable_irq(netdev->irq); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8e13ec84c53812e92bbabd88a12a25e12c353a75..9fcaf191063354e6ad315a27926afad217b58015 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1374,13 +1374,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->dma_buff_base_p = dma_handle; netdev_dbg(ndev, "IO address space :%pR\n", res); - netdev_dbg(ndev, "IO address size :%d\n", resource_size(res)); + netdev_dbg(ndev, "IO address size :%zd\n", + (size_t)resource_size(res)); netdev_dbg(ndev, "IO address (mapped) :0x%p\n", pldat->net_base); netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); - netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); - netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", - pldat->dma_buff_base_p); + netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size); + netdev_dbg(ndev, "DMA buffer P address :%pad\n", + &pldat->dma_buff_base_p); netdev_dbg(ndev, "DMA buffer V address :0x%p\n", pldat->dma_buff_base_v); @@ -1427,8 +1428,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) if (ret) goto err_out_unregister_netdev; - netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", - res->start, ndev->irq); + netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n", + (unsigned long)res->start, ndev->irq); phydev = ndev->phydev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index a769196628d91a5d2232d7530f558b3517310179..708117fc6f7336aef47a440de4510c3d9b19e132 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -958,7 +958,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + goto err4; } } @@ -966,6 +966,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, return 0; +err4: + qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 85f46dbecd5b57b5cdb537900a51dbd8cfa4dcc1..9b1920b58594ae5000b7b5dacd7113153176fe4e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2619,8 +2619,16 @@ enum qede_remove_mode { static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); - struct qede_dev *edev = netdev_priv(ndev); - struct qed_dev *cdev = edev->cdev; + struct qede_dev *edev; + struct qed_dev *cdev; + + if (!ndev) { + dev_info(&pdev->dev, "Device has already been removed\n"); + return; + } + + edev = netdev_priv(ndev); + cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 355c5fb802cd71e10474f8dc2c4334cc5e3471a0..c653b97d84d5bd911cf20dc44f5ff635c33d53da 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2783,6 +2783,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); + dev_kfree_skb_irq(skb); ql_free_large_buffers(qdev); return -ENOMEM; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 4b76c69fe86d2aaa8609456277b767f1993bffe4..834208e55f7b8a7d1e2ac535096e64b263505178 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) - return 0; + return 1; switch (capid) { case DCB_CAP_ATTR_PG: diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index c2bd5378ffdaf005d039785bccfe59f52b2d6c8d..3527962f0bdad75e716c5d9cdae70927ac52ec19 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev) printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; - goto err_out_free_page; + goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; -err_out_free_page: - free_page((unsigned long) sp->srings); +err_out_free_attrs: + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); err_out_free_dev: free_netdev(dev); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 77a5364f7a109927aa905f6ec8403cfa3d980136..04cbff7f1b23d520e3d3b6c9bb73d61c92a43cd5 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1320,7 +1320,8 @@ void efx_ptp_remove(struct efx_nic *efx) (void)efx_ptp_disable(efx); cancel_work_sync(&efx->ptp_data->work); - cancel_work_sync(&efx->ptp_data->pps_work); + if (efx->ptp_data->pps_workwq) + cancel_work_sync(&efx->ptp_data->pps_work); skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->txq); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index cb49c9654f0a7c2d8d0bb922bc4b38c7e6180419..323b3ac16bc0e3ae283621fb97a3006e3217546a 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -514,7 +514,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); unsigned int free; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 73212590d04a362e055039d90b11e1329592d21a..b0c72167badeca68e5a8fefa046de571fdf9ccfe 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -637,7 +637,8 @@ done: if (!THROTTLE_TX_PKTS) * now, or set the card to generates an interrupt when ready * for the packet. */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 734caa7a557bed4c7fc47292f5992665dae1a6b1..4143659615e1095d46353088a43dd68f8d838fb7 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1776,7 +1776,8 @@ static int smsc911x_stop(struct net_device *dev) } /* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int freespace; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d7cb205fe7e261d3f34482db05710efecd440111..892b06852e150384a2140fb5b03b8cd749054e03 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -590,6 +590,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); + __dev_mc_unsync(ndev, NULL); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 272f2b1cb7add6bf95da8e9c25794aaf0404d6f1..34f84379553109ef9fed37204c0444e8b2612db1 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card, * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, <0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 8505196be9f52bd7a982ec9dcb91f850379cfd07..d123644bd720b0604454c68328ff81d94da74c5c 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card); void gelic_card_down(struct gelic_card *card); int gelic_net_open(struct net_device *netdev); int gelic_net_stop(struct net_device *netdev); -int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); void gelic_net_set_multi(struct net_device *netdev); void gelic_net_tx_timeout(struct net_device *netdev); int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 36a6e8b54d9415d65bf1b6d2804a53949372eaa6..1085987946212fdc4406c6dbb0fd7b0bec37cfe5 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -880,9 +880,9 @@ spider_net_kick_tx_dma(struct spider_net_card *card) * @skb: packet to send out * @netdev: interface device structure * - * returns 0 on success, !0 on failure + * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure */ -static int +static netdev_tx_t spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) { int cnt; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 47ebac456ae57e1ef47a84b3dd7138a974bd0e90..9b84ee736fdc11c31ce9d29772c5f7c2390d40c1 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_ /* Index to functions, as function prototypes. */ static int tc35815_open(struct net_device *dev); -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t tc35815_send_packet(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); static int tc35815_rx(struct net_device *dev, int limit); static int tc35815_poll(struct napi_struct *napi, int budget); @@ -1249,7 +1250,8 @@ tc35815_open(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); struct TxFD *txfd; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index a9bd665fd1225be2a42f389c0ccb5d600124c9ab..545f60877bb7df271a3012b1d2237ed86383bc17 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -673,7 +673,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return 0; } -static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 5f21ddff9e0f996f5c4b8b92234ee940c2214a45..46fcf3ec2caf7f604335c5d697bc6a6a9f494fee 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -655,7 +655,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ -static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t +axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index aa02a03a6d8db22996941cd53d4d12968a4e0ee2..034b36442ee7531d9f2b9b7113059aac2e596f72 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1005,9 +1005,10 @@ static int xemaclite_close(struct net_device *dev) * deferred and the Tx queue is stopped so that the deferred socket buffer can * be transmitted when the Emaclite device is free to transmit data. * - * Return: 0, always. + * Return: NETDEV_TX_OK, always. */ -static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +static netdev_tx_t +xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *new_skb; @@ -1028,7 +1029,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) /* Take the time stamp now, since we can't do this in an ISR. */ skb_tx_timestamp(new_skb); spin_unlock_irqrestore(&lp->reset_lock, flags); - return 0; + return NETDEV_TX_OK; } spin_unlock_irqrestore(&lp->reset_lock, flags); @@ -1037,7 +1038,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) dev->stats.tx_bytes += len; dev_consume_skb_any(new_skb); - return 0; + return NETDEV_TX_OK; } /** diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 7ea8ead4fd1c70a431ef3f8c96f3f3dccf23d18d..bbc983b04561ffab6f29cb0e0fc0a52560153256 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1187,8 +1187,17 @@ static int fjes_probe(struct platform_device *plat_dev) adapter->open_guard = false; adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->txrx_wq)) { + err = -ENOMEM; + goto err_free_netdev; + } + adapter->control_wq = alloc_workqueue(DRV_NAME "/control", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->control_wq)) { + err = -ENOMEM; + goto err_free_txrx_wq; + } INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); INIT_WORK(&adapter->raise_intr_rxdata_task, @@ -1205,7 +1214,7 @@ static int fjes_probe(struct platform_device *plat_dev) hw->hw_res.irq = platform_get_irq(plat_dev, 0); err = fjes_hw_init(&adapter->hw); if (err) - goto err_free_netdev; + goto err_free_control_wq; /* setup MAC address (02:00:00:00:00:[epid])*/ netdev->dev_addr[0] = 2; @@ -1225,6 +1234,10 @@ static int fjes_probe(struct platform_device *plat_dev) err_hw_exit: fjes_hw_exit(&adapter->hw); +err_free_control_wq: + destroy_workqueue(adapter->control_wq); +err_free_txrx_wq: + destroy_workqueue(adapter->txrx_wq); err_free_netdev: free_netdev(netdev); err_out: diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index f186e0460cde311092f2ae8298e83060317bef5f..12df6cfb423ace7a1bb81a145c11f98e8f60620a 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -838,10 +838,11 @@ static void atusb_disconnect(struct usb_interface *interface) ieee802154_unregister_hw(atusb->hw); + usb_put_dev(atusb->usb_dev); + ieee802154_free_hw(atusb->hw); usb_set_intfdata(interface, NULL); - usb_put_dev(atusb->usb_dev); pr_debug("atusb_disconnect done\n"); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d91f020a8491d49d965f835451f1e4fa58db9de5..a48ed0873cc72383da8b2e5a42b54143b6a10222 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1240,6 +1240,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); + skb_orphan(skb); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, skb->len); @@ -2797,9 +2798,6 @@ static int macsec_dev_open(struct net_device *dev) struct net_device *real_dev = macsec->real_dev; int err; - if (!(real_dev->flags & IFF_UP)) - return -ENETDOWN; - err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) return err; @@ -3274,6 +3272,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) goto del_dev; + netif_stacked_transfer_operstate(real_dev, dev); + linkwatch_fire_event(dev); + macsec_generation++; return 0; @@ -3445,6 +3446,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, return NOTIFY_DONE; switch (event) { + case NETDEV_DOWN: + case NETDEV_UP: + case NETDEV_CHANGE: { + struct macsec_dev *m, *n; + struct macsec_rxh_data *rxd; + + rxd = macsec_data_rtnl(real_dev); + list_for_each_entry_safe(m, n, &rxd->secys, secys) { + struct net_device *dev = m->secy.netdev; + + netif_stacked_transfer_operstate(real_dev, dev); + } + break; + } case NETDEV_UNREGISTER: { struct macsec_dev *m, *n; struct macsec_rxh_data *rxd; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index a9acf7156855546080e850bc07c7a5380ccc1980..03009f1becddc954c450221ff830432fa2c8f463 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(unsigned long data) struct ntb_netdev *dev = netdev_priv(ndev); if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { - mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 2a1b490bc58718ee20e647ec341aa54707bf174a..718cd3c59e92caea447228cd03633c8d8e0d6e71 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) { + u16 lb_dis = BIT(1); + if (disable) - ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1); + ns_exp_write(phydev, 0x1c0, + ns_exp_read(phydev, 0x1c0) | lb_dis); else ns_exp_write(phydev, 0x1c0, - ns_exp_read(phydev, 0x1c0) & 0xfffe); + ns_exp_read(phydev, 0x1c0) & ~lb_dis); pr_debug("10BASE-T HDX loopback %s\n", - (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); + (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on"); } static int ns_config_init(struct phy_device *phydev) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index f2729cbdb83e808dcfaae7a7a5c58fe7cc8195e8..d209d5fe9c8e6e37a258e9ceea975000d4d6dc19 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1432,6 +1432,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) netif_wake_queue(ppp->dev); else netif_stop_queue(ppp->dev); + } else { + kfree_skb(skb); } ppp_xmit_unlock(ppp); } diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 9ed6d1c1ee45f15b2686b7c4102045b3acead55c..b2317b3a542a02a898a88beb5064320b4d9278a7 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -860,6 +860,7 @@ static int slip_open(struct tty_struct *tty) sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); + free_netdev(sl->dev); err_exit: rtnl_unlock(); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e33a45e5ec0db4039d9d938dcc2f87bf6b6efe18..134c59c9c20dccfcf4d5e852fac0cdc61a7a439a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -627,7 +627,8 @@ static void tun_detach_all(struct net_device *dev) module_put(THIS_MODULE); } -static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter) +static int tun_attach(struct tun_struct *tun, struct file *file, + bool skip_filter, bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; @@ -669,7 +670,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; - rcu_assign_pointer(tfile->tun, tun); + if (publish_tun) + rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; @@ -1755,7 +1757,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (err < 0) return err; - err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER); + err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true); if (err < 0) return err; @@ -1843,13 +1845,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) NETIF_F_HW_VLAN_STAG_TX); INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false); + err = tun_attach(tun, file, false, false); if (err < 0) goto err_free_flow; err = register_netdevice(tun->dev); if (err < 0) goto err_detach; + /* free_netdev() won't check refcnt, to aovid race + * with dev_put() we need publish tun after registration. + */ + rcu_assign_pointer(tfile->tun, tun); } netif_carrier_on(tun->dev); @@ -1993,7 +1999,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; - ret = tun_attach(tun, file, false); + ret = tun_attach(tun, file, false, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 49a3bc107d05490fc061c94815b746170942ba8b..2c50497cc4edcbd44f96f2d1efcb00eca505cff5 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -215,7 +215,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); - if (ret < 0) { + if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); goto free; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 99424c87b4640db94b68c497e55dad18cb48a65c..8f03cc52ddda25b5c6c65c9a800916d0725ca118 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -212,9 +212,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) goto bad_desc; } skip: - if ( rndis && - header.usb_cdc_acm_descriptor && - header.usb_cdc_acm_descriptor->bmCapabilities) { + /* Communcation class functions with bmCapabilities are not + * RNDIS. But some Wireless class RNDIS functions use + * bmCapabilities for their own purpose. The failsafe is + * therefore applied only to Communication class RNDIS + * functions. The rndis test is redundant, but a cheap + * optimization. + */ + if (rndis && is_rndis(&intf->cur_altsetting->desc) && + header.usb_cdc_acm_descriptor && + header.usb_cdc_acm_descriptor->bmCapabilities) { dev_dbg(&intf->dev, "ACM capabilities %02x, not really RNDIS?\n", header.usb_cdc_acm_descriptor->bmCapabilities); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 7b158674ceeda9f736d6fb3e191663c496b390c6..be4e56826daf6dd9d04926a577540fe9f6e19168 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -576,8 +576,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) /* read current mtu value from device */ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); - if (err < 0) { + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); + if (err != sizeof(max_datagram_size)) { dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); goto out; } @@ -588,7 +588,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) max_datagram_size = cpu_to_le16(ctx->max_datagram_size); err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); if (err < 0) dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); @@ -679,8 +679,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf) u8 ep; for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) { - e = intf->cur_altsetting->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(&e->desc)) { diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 66ae647b712eb9ab1e7d2e727d5bafad1e900d02..27fc699d8be5b44f2f5e82d8c619603cdc45baa7 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2635,14 +2635,18 @@ static struct hso_device *hso_create_bulk_serial_device( */ if (serial->tiocmget) { tiocmget = serial->tiocmget; + tiocmget->endp = hso_get_ep(interface, + USB_ENDPOINT_XFER_INT, + USB_DIR_IN); + if (!tiocmget->endp) { + dev_err(&interface->dev, "Failed to find INT IN ep\n"); + goto exit; + } + tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); if (tiocmget->urb) { mutex_init(&tiocmget->mutex); init_waitqueue_head(&tiocmget->waitq); - tiocmget->endp = hso_get_ep( - interface, - USB_ENDPOINT_XFER_INT, - USB_DIR_IN); } else hso_free_tiomget(serial); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b9ba8bb3a8ef1106bde8ebad3695a7574abf73b5..32a24932715a07b247790a46332c6658429ca96c 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2627,6 +2627,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) int i; ret = lan78xx_get_endpoints(dev, intf); + if (ret) { + netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", + ret); + return ret; + } dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 05953e14a064ec7cdf3c10f30d986bed1a847be6..de7b431fdd6b5ce182e8dca6dc4f677fe9bab38b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -940,6 +940,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -950,6 +951,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 02e29562d254e5981cc9b960693c9cce8e0259f7..15dc70c11857978b9b11e38b7a993560522b5dbe 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -689,8 +689,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, 500); + if (ret < 0) + memset(data, 0xff, size); + else + memcpy(data, tmp, size); - memcpy(data, tmp, size); kfree(tmp); return ret; diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 004c955c1fd1b8f5d43586b389a13a88953e4378..da0ae16f5c74cf8e88f5465fd072c8779f95a951 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net) static int sr_mdio_read(struct net_device *net, int phy_id, int loc) { struct usbnet *dev = netdev_priv(net); - __le16 res; + __le16 res = 0; mutex_lock(&dev->phy_mutex); sr_set_sw_mii(dev); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index a5acbcb3c044556c641b9c3858ceec69179a1be6..0b5fd1499ac06014fc05bccd2765cb79deb9687c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -114,6 +114,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) int intr = 0; e = alt->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: if (!usb_endpoint_dir_in(&e->desc)) @@ -349,6 +354,8 @@ void usbnet_update_max_qlen(struct usbnet *dev) { enum usb_device_speed speed = dev->udev->speed; + if (!dev->rx_urb_size || !dev->hard_mtu) + goto insanity; switch (speed) { case USB_SPEED_HIGH: dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; @@ -365,6 +372,7 @@ void usbnet_update_max_qlen(struct usbnet *dev) dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; break; default: +insanity: dev->rx_qlen = dev->tx_qlen = 4; } } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b6ee0c1690d83116693eabe91bd6bbddc6bdcd53..340bd98b8dbd74a77df759b84c133c02867bcc2e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2049,8 +2049,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, label = info->key.label; udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); - if (info->options_len) + if (info->options_len) { + if (info->options_len < sizeof(*md)) + goto drop; md = ip_tunnel_info_opts(info); + } } else { md->gbp = skb->mark; } diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index da770af830369ffa66cfe7511786e05797d559d0..125b5c31b2b0a1d58ff19836171514392769a561 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -658,10 +658,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar) ath10k_ahb_irq_disable(ar); synchronize_irq(ar_ahb->irq); - ath10k_pci_flush(ar); - napi_synchronize(&ar->napi); napi_disable(&ar->napi); + + ath10k_pci_flush(ar); } static int ath10k_ahb_hif_power_up(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 90c0c4a7175db5a940d1ee119ed37bff4ac70001..414153cd578457c528dcfb6ced4a9a56a4642dd6 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -811,6 +811,7 @@ struct ath10k { struct completion install_key_done; + int last_wmi_vdev_start_status; struct completion vdev_setup_done; struct workqueue_struct *workqueue; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1588fe8110d00c0f2294bd44acfc5a0e9ba3e191..2294ba311c47ab998fd1a0604c225b1f9c1a5074 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -947,7 +947,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) if (time_left == 0) return -ETIMEDOUT; - return 0; + return ar->last_wmi_vdev_start_status; } static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 25b8d501d437e7f9efc4a0caaf8c88af614047e1..d84a362a084ac2a85ce49e46eaf766313fc9b2e1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1039,10 +1039,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; u32 *buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; - u32 ce_data; /* Host buffer address in CE space */ dma_addr_t ce_data_base = 0; int i; @@ -1056,9 +1055,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, - orig_nbytes, + alloc_nbytes, &ce_data_base, GFP_ATOMIC); if (!data_buf) { @@ -1066,9 +1066,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, goto done; } - /* Copy caller's data to allocated DMA buf */ - memcpy(data_buf, data, orig_nbytes); - /* * The address supplied by the caller is in the * Target CPU virtual address space. @@ -1081,12 +1078,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, */ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); - remaining_bytes = orig_nbytes; - ce_data = ce_data_base; + remaining_bytes = nbytes; while (remaining_bytes) { /* FIXME: check cast */ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); + /* Copy caller's data to allocated DMA buf */ + memcpy(data_buf, data, nbytes); + /* Set up to receive directly into Target(!) address */ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) @@ -1096,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, + ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done; @@ -1137,12 +1136,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, remaining_bytes -= nbytes; address += nbytes; - ce_data += nbytes; + data += nbytes; } done: if (data_buf) { - dma_free_coherent(ar->dev, orig_nbytes, data_buf, + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); } @@ -1781,9 +1780,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); - ath10k_pci_flush(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); + ath10k_pci_flush(ar); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index bbfe7be214e1251a0ccf740c0fa944a48104977a..af3bc06b4aeda69ba67f997000752a9ad8915c64 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2384,7 +2384,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) status->freq, status->band, status->signal, status->rate_idx); - ieee80211_rx(ar->hw, skb); + ieee80211_rx_ni(ar->hw, skb); + return 0; } @@ -3102,18 +3103,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb) { struct wmi_vdev_start_ev_arg arg = {}; int ret; + u32 status; ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + ar->last_wmi_vdev_start_status = 0; + ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg); if (ret) { ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret); - return; + ar->last_wmi_vdev_start_status = ret; + goto out; } - if (WARN_ON(__le32_to_cpu(arg.status))) - return; + status = __le32_to_cpu(arg.status); + if (WARN_ON_ONCE(status)) { + ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n", + status, (status == WMI_VDEV_START_CHAN_INVALID) ? + "chan-invalid" : "unknown"); + /* Setup is done one way or another though, so we should still + * do the completion, so don't return here. + */ + ar->last_wmi_vdev_start_status = -EINVAL; + } +out: complete(&ar->vdev_setup_done); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 9b8562ff669876f6525e1730ec3e039808c769a6..cce028ea9b57dcd26c4ddf52091fde378d34da94 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6248,11 +6248,17 @@ struct wmi_ch_info_ev_arg { __le32 rx_frame_count; }; +/* From 10.4 firmware, not sure all have the same values. */ +enum wmi_vdev_start_status { + WMI_VDEV_START_OK = 0, + WMI_VDEV_START_CHAN_INVALID, +}; + struct wmi_vdev_start_ev_arg { __le32 vdev_id; __le32 req_id; __le32 resp_type; /* %WMI_VDEV_RESP_ */ - __le32 status; + __le32 status; /* See wmi_vdev_start_status enum above */ }; struct wmi_peer_kick_ev_arg { diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 9da3594fd010d08a64994c848443076f4745a7a8..fc22c5f479276a089aabf931be5b9ac33ed62034 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) struct ath6kl_urb_context *urb_context = NULL; unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return NULL; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = @@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, { unsigned long flags; + /* bail if this pipe is not initialized */ + if (!pipe->ar_usb) + return; + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 08607d7fdb56adc865b4fc70564abca86a49488d..7eff6f8023d8277b1fab876f4af1b7f50ce8d064 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4115,7 +4115,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) { - u32 data, ko, kg; + u32 data = 0, ko, kg; if (!AR_SREV_9462_20_OR_LATER(ah)) return; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index eedf86b67cf517f4b9a400333cbc000ad28441d7..807fbe31e93039bbaca6465e7fd987eb18f55eff 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -411,7 +411,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs, ath_dbg(common, SPECTRAL_SCAN, "Calculated new upper max 0x%X at %i\n", - tmp_mag, i); + tmp_mag, fft_sample_40.upper_max_index); } else for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) { if (fft_sample_40.data[i] == (upper_mag >> max_exp)) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index b868f02ced893d8891839a74171ea32901c1a34a..abc997427bae5f64ff40a40cddae4ab932af4132 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1250,7 +1250,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, struct ath_node *an = &avp->mcast_node; mutex_lock(&sc->mutex); - if (IS_ENABLED(CONFIG_ATH9K_TX99)) { if (sc->cur_chan->nvifs >= 1) { mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 8e9480cc33e15843b249f3d171dd91aa8875cb99..096902e0fdf5c4c88818168fb98cf1c98a913684 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) struct sk_buff *skb; struct ath_vif *avp; - if (!sc->tx99_vif) - return NULL; - - avp = (struct ath_vif *)sc->tx99_vif->drv_priv; - skb = alloc_skb(len, GFP_KERNEL); if (!skb) return NULL; @@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc) memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); - hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + if (sc->tx99_vif) { + avp = (struct ath_vif *) sc->tx99_vif->drv_priv; + hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); + } tx_info = IEEE80211_SKB_CB(skb); memset(tx_info, 0, sizeof(*tx_info)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index f78d91b6928711a2128317d60d7ea054eb09abd7..aac9c97d225573cae436ffe82f22d80b30f75331 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -74,7 +74,7 @@ #define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) #define P2P_INVALID_CHANNEL -1 #define P2P_CHANNEL_SYNC_RETRY 5 -#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500) +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450) #define P2P_DEFAULT_SLEEP_TIME_VSDB 200 /* WiFi P2P Public Action Frame OUI Subtypes */ @@ -1139,7 +1139,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) { struct afx_hdl *afx_hdl = &p2p->afx_hdl; struct brcmf_cfg80211_vif *pri_vif; - unsigned long duration; s32 retry; brcmf_dbg(TRACE, "Enter\n"); @@ -1155,7 +1154,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) * pending action frame tx is cancelled. */ retry = 0; - duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT); while ((retry < P2P_CHANNEL_SYNC_RETRY) && (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) { afx_hdl->is_listen = false; @@ -1163,7 +1161,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) retry); /* search peer on peer's listen channel */ schedule_work(&afx_hdl->afx_work); - wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))) @@ -1176,7 +1175,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) afx_hdl->is_listen = true; schedule_work(&afx_hdl->afx_work); wait_for_completion_timeout(&afx_hdl->act_frm_scan, - duration); + P2P_AF_FRM_SCAN_MAX_WAIT); } if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, @@ -1463,10 +1462,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, return 0; if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) { - if (e->status == BRCMF_E_STATUS_SUCCESS) + if (e->status == BRCMF_E_STATUS_SUCCESS) { set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); - else { + if (!p2p->wait_for_offchan_complete) + complete(&p2p->send_af_done); + } else { set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); /* If there is no ack, we don't need to wait for * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event @@ -1517,6 +1518,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, p2p->af_sent_channel = le32_to_cpu(af_params->channel); p2p->af_tx_sent_jiffies = jiffies; + if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) && + p2p->af_sent_channel == + ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq)) + p2p->wait_for_offchan_complete = false; + else + p2p->wait_for_offchan_complete = true; + + brcmf_dbg(TRACE, "Waiting for %s tx completion event\n", + (p2p->wait_for_offchan_complete) ? + "off-channel" : "on-channel"); + timeout = wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 8ce9447533ef8fa519caab0f985949487346464d..fbee51148904619817025bceed5d6c16d5bd6b2b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -124,6 +124,7 @@ struct afx_hdl { * @gon_req_action: about to send go negotiation requets frame. * @block_gon_req_tx: drop tx go negotiation requets frame. * @p2pdev_dynamically: is p2p device if created by module param or supplicant. + * @wait_for_offchan_complete: wait for off-channel tx completion event. */ struct brcmf_p2p_info { struct brcmf_cfg80211_info *cfg; @@ -144,6 +145,7 @@ struct brcmf_p2p_info { bool gon_req_action; bool block_gon_req_tx; bool p2pdev_dynamically; + bool wait_for_offchan_complete; }; s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 7c2a9a9bc372c19502d3bf1d7091ccbd4654b061..b820e80d4b4c2a102f1aa5d15b447d4857dda3d3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } spin_lock_bh(&wl->lock); + wl->wlc->vif = vif; wl->mute_tx = false; brcms_c_mute(wl->wlc, false); if (vif->type == NL80211_IFTYPE_STATION) @@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static void brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct brcms_info *wl = hw->priv; + + spin_lock_bh(&wl->lock); + wl->wlc->vif = NULL; + spin_unlock_bh(&wl->lock); } static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) @@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, status = brcms_c_aggregatable(wl->wlc, tid); spin_unlock_bh(&wl->lock); if (!status) { - brcms_err(wl->wlc->hw->d11core, - "START: tid %d is not agg\'able\n", tid); + brcms_dbg_ht(wl->wlc->hw->d11core, + "START: tid %d is not agg\'able\n", tid); return -EINVAL; } ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); @@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw, spin_unlock_bh(&wl->lock); } +static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct brcms_info *wl = hw->priv; + struct sk_buff *beacon = NULL; + u16 tim_offset = 0; + + spin_lock_bh(&wl->lock); + if (wl->wlc->vif) + beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif, + &tim_offset, NULL); + if (beacon) + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, + wl->wlc->vif->bss_conf.dtim_period); + spin_unlock_bh(&wl->lock); + + return 0; +} + static const struct ieee80211_ops brcms_ops = { .tx = brcms_ops_tx, .start = brcms_ops_start, @@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = { .flush = brcms_ops_flush, .get_tsf = brcms_ops_get_tsf, .set_tsf = brcms_ops_set_tsf, + .set_tim = brcms_ops_beacon_set_tim, }; void brcms_dpc(unsigned long data) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h index c4d135cff04ad2f7883c783fb96244bbbabfa370..9f76b880814e8201744a83256f8a89af8e4f0ba7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h @@ -563,6 +563,7 @@ struct brcms_c_info { struct wiphy *wiphy; struct scb pri_scb; + struct ieee80211_vif *vif; struct sk_buff *beacon; u16 beacon_tim_offset; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 69b826d229c5b7462e157de2b93ec8bbd4de1f2a..04939e576ee022ceec788224da8a177dd89a890d 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5472,7 +5472,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { - ptr += sprintf(ptr, "%pM %*s rssi = %d", + ptr += sprintf(ptr, "%pM %.*s rssi = %d", BSSList_rid.bssid, (int)BSSList_rid.ssidLen, BSSList_rid.ssid, diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 22b7fec2e3cd7adf652cfb6defe6d2e1dcdbf9fc..86a61496e50f48ed21ea6d9818aefe077099bf7c 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2084,6 +2084,26 @@ static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv) return 0; } +static int cnss_mhi_link_status(struct mhi_controller *mhi_ctrl, void *priv) +{ + struct cnss_pci_data *pci_priv = priv; + u16 device_id; + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL\n"); + return -EINVAL; + } + + pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id); + if (device_id != pci_priv->device_id) { + cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n", + device_id, pci_priv->device_id); + return -EIO; + } + + return 0; +} + static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv) { int ret = 0; @@ -2161,6 +2181,87 @@ static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv) kfree(mhi_ctrl->irq); } +static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state mhi_state) +{ + switch (mhi_state) { + case CNSS_MHI_INIT: + if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_DEINIT: + case CNSS_MHI_POWER_ON: + if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) && + !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_FORCE_POWER_OFF: + if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_POWER_OFF: + case CNSS_MHI_SUSPEND: + if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && + !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_RESUME: + if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_TRIGGER_RDDM: + if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && + !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_RDDM_DONE: + return 0; + default: + cnss_pr_err("Unhandled MHI state: %s(%d)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state); + } + + cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state, + pci_priv->mhi_state); + + return -EINVAL; +} + +static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state mhi_state) +{ + switch (mhi_state) { + case CNSS_MHI_INIT: + set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); + break; + case CNSS_MHI_DEINIT: + clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); + break; + case CNSS_MHI_POWER_ON: + set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); + break; + case CNSS_MHI_POWER_OFF: + case CNSS_MHI_FORCE_POWER_OFF: + clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); + clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); + break; + case CNSS_MHI_SUSPEND: + set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); + break; + case CNSS_MHI_RESUME: + clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); + break; + case CNSS_MHI_TRIGGER_RDDM: + break; + case CNSS_MHI_RDDM_DONE: + set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); + break; + default: + cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state); + } +} + int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv, enum cnss_mhi_state mhi_state) { @@ -2234,74 +2335,8 @@ int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv, return ret; } -static int cnss_mhi_link_status(struct mhi_controller *mhi_ctrl, void *priv) -{ - struct cnss_pci_data *pci_priv = priv; - u16 device_id; - - if (!pci_priv) { - cnss_pr_err("pci_priv is NULL\n"); - return -EINVAL; - } - - pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id); - if (device_id != pci_priv->device_id) { - cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n", - device_id, pci_priv->device_id); - return -EIO; - } - - return 0; -} - -static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv, - enum cnss_mhi_state mhi_state) -{ - switch (mhi_state) { - case CNSS_MHI_INIT: - if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_DEINIT: - case CNSS_MHI_POWER_ON: - if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) && - !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_FORCE_POWER_OFF: - if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_POWER_OFF: - case CNSS_MHI_SUSPEND: - if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && - !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_RESUME: - if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_TRIGGER_RDDM: - if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && - !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state)) - return 0; - break; - case CNSS_MHI_RDDM_DONE: - return 0; - default: - cnss_pr_err("Unhandled MHI state: %s(%d)\n", - cnss_mhi_state_to_str(mhi_state), mhi_state); - } - - cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n", - cnss_mhi_state_to_str(mhi_state), mhi_state, - pci_priv->mhi_state); - - return -EINVAL; -} -static int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv) +int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv) { int ret; struct cnss_plat_data *plat_priv; @@ -2341,39 +2376,6 @@ void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv) plat_priv->ramdump_info_v2.dump_data_valid = false; } -static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv, - enum cnss_mhi_state mhi_state) -{ - switch (mhi_state) { - case CNSS_MHI_INIT: - set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); - break; - case CNSS_MHI_DEINIT: - clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); - break; - case CNSS_MHI_POWER_ON: - set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); - break; - case CNSS_MHI_POWER_OFF: - case CNSS_MHI_FORCE_POWER_OFF: - clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); - clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); - break; - case CNSS_MHI_SUSPEND: - set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); - break; - case CNSS_MHI_RESUME: - clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); - break; - case CNSS_MHI_TRIGGER_RDDM: - break; - case CNSS_MHI_RDDM_DONE: - set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state); - break; - default: - cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state); - } -} int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 207d8ae1e1160d6239a537981d30e659161fc40d..19052efe53f1292891dc0f38a2f932658b9c5fea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -935,8 +935,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { - .configure_keys = !d0i3, + .configure_keys = !d0i3 && !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 1aa74b87599ff3e1101c2b17cebd38c667bd430b..63dcea640d076b0cbf99157381c540dc41d7002b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1303,6 +1303,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + /* + * If we are freeing multiple frames, mark all the frames + * but the first one as acked, since they were acknowledged + * before + * */ + if (skb_freed > 1) + info->flags |= IEEE80211_TX_STAT_ACK; + iwl_mvm_tx_status_check_trigger(mvm, status); info->status.rates[0].count = tx_resp->failure_frame + 1; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index a605d569f663a295eb149d450d94bb2ca87f56c4..9d147b11ee51696668c2d7dc118628c5d5ca3bc5 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8388, "libertas/usb8388_v5.bin", NULL }, { MODEL_8388, "libertas/usb8388.bin", NULL }, { MODEL_8388, "usb8388.bin", NULL }, - { MODEL_8682, "libertas/usb8682.bin", NULL } + { MODEL_8682, "libertas/usb8682.bin", NULL }, + { 0, NULL, NULL } }; static struct usb_device_id if_usb_table[] = { diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 46d0099fd6e82a69dd88027e5c7fc81d8fd83920..94901b0041ceceee495e2a56df3426d141b150db 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -364,11 +364,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, struct mwifiex_power_cfg power_cfg; int dbm = MBM_TO_DBM(mbm); - if (type == NL80211_TX_POWER_FIXED) { + switch (type) { + case NL80211_TX_POWER_FIXED: power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 1; power_cfg.power_level = dbm; - } else { + break; + case NL80211_TX_POWER_LIMITED: + power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 0; + power_cfg.power_level = dbm; + break; + case NL80211_TX_POWER_AUTOMATIC: power_cfg.is_power_auto = 1; + break; } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 0f977dc556caae7c2f25bf01d4491fe471c62ab5..c67e08fa1aaff4f939ec89c770ea792edeace6b9 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -240,6 +240,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, } vs_ie = (struct ieee_types_header *)vendor_ie; + if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > + IEEE_MAX_IE_SIZE) + return -EINVAL; memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), vs_ie, vs_ie->len + 2); le16_add_cpu(&ie->ie_length, vs_ie->len + 2); diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 536ab834b12625f8ac7ccce1b309b465f82db31b..729a69f88a481b4dc61c9e7ccc8ae6471797a55b 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -265,6 +265,7 @@ struct mwifiex_ds_encrypt_key { struct mwifiex_power_cfg { u32 is_power_auto; + u32 is_power_fixed; u32 power_level; }; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 7f9645703d9688f5c7b0d18c142af8e8c89cebb1..478885afb6c6bd6f1553ad07207356e6b778700d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -728,6 +728,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); if (!power_cfg->is_power_auto) { + u16 dbm_min = power_cfg->is_power_fixed ? + dbm : priv->min_tx_power_level; + txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); @@ -742,7 +745,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x03; pg->modulation_class = MOD_CLASS_HR_DSSS; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class OFDM */ @@ -750,7 +753,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x07; pg->modulation_class = MOD_CLASS_OFDM; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class HTBW20 */ @@ -758,7 +761,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_20; pg++; @@ -767,7 +770,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_40; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index a7e9f544f219d717a13ed29de537383c312c2573..f2ef1464e20cc663a00578c20d965161d67bb8b4 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -287,6 +287,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) + return; memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); rate_len = rate_ie->len; } @@ -294,8 +296,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, params->beacon.tail, params->beacon.tail_len); - if (rate_ie) + if (rate_ie) { + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) + return; memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); + } return; } @@ -413,6 +418,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, params->beacon.tail_len); if (vendor_ie) { wmm_ie = (struct ieee_types_header *)vendor_ie; + if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) + return; memcpy(&bss_cfg->wmm_info, wmm_ie + 1, sizeof(bss_cfg->wmm_info)); priv->wmm_enabled = 1; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c index c2d5b495c179a1021dd4cd4221c0032f3a99e34a..c089540116fa72e6952c5a2670aaf32cb99a53e8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c @@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev, led->dev = dev; led->ledpin = ledpin; led->is_radio = is_radio; - strncpy(led->name, name, sizeof(led->name)); + strlcpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = default_trigger; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 4e725d165aa60dade484c5860178c57fcd547d98..e78545d4add3ce2bf779406fe1f1fdf29afef803 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5660,6 +5660,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index d0ffc4d508cff2df70606c9a88845a22884c7bc9..e8963dd2977c40b9335b65b34f998cfb3ed70184 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -770,6 +770,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == @@ -864,6 +867,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 8de29cc3ced0763f4b7ff2b18f7dbd5d0ff5b4e2..a24644f34e650fc679ada018f3912bbd0a3de60f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -234,7 +234,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw) rtl_read_byte(rtlpriv, FW_MAC1_READY)); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", + "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", rtl_read_dword(rtlpriv, REG_MCUFWDL)); return -1; } diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index fd4e9ba176c9b1ab3f16878f95d6041f12e39903..332a3a5c1c900d1e4df54734b5be25c5d73ee972 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -66,7 +66,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, out: mutex_unlock(&wl->mutex); - return 0; + return ret; } static int diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index cae69148610574d6087c1c762bcf4a4e98da420f..46008f284550205357bf2b2abddf717cff85ec51 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -171,7 +171,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, return vif->hash.mapping[skb_get_hash_raw(skb) % size]; } -static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; @@ -706,7 +707,6 @@ int xenvif_connect_data(struct xenvif_queue *queue, xenvif_unmap_frontend_data_rings(queue); netif_napi_del(&queue->napi); err: - module_put(THIS_MODULE); return err; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 14ceeaaa7fe5f967e8593d2e702f6195c4772466..6d391a268469f447efa335fd316fd20b0665cc8a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -888,9 +888,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb, return 0; } -static RING_IDX xennet_fill_frags(struct netfront_queue *queue, - struct sk_buff *skb, - struct sk_buff_head *list) +static int xennet_fill_frags(struct netfront_queue *queue, + struct sk_buff *skb, + struct sk_buff_head *list) { RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -907,9 +907,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons; + queue->rx.rsp_cons = ++cons + skb_queue_len(list); kfree_skb(nskb); - return ~0U; + return -ENOENT; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, @@ -920,7 +920,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, kfree_skb(nskb); } - return cons; + queue->rx.rsp_cons = cons; + + return 0; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) @@ -1046,8 +1048,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb->data_len = rx->status; skb->len += rx->status; - i = xennet_fill_frags(queue, skb, &tmpq); - if (unlikely(i == ~0U)) + if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) goto err; if (rx->flags & XEN_NETRXF_csum_blank) @@ -1057,7 +1058,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) __skb_queue_tail(&rxq, skb); - queue->rx.rsp_cons = ++i; + i = ++queue->rx.rsp_cons; work_done++; } diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index fbd26ecbf4a49212b8d73d35224b7d0299eed64f..4b8a0e61f6853521e0145e46e0c899d4db0efb45 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -278,7 +278,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, *fw_vsc_cfg, len); if (r) { - devm_kfree(dev, fw_vsc_cfg); + devm_kfree(dev, *fw_vsc_cfg); goto vsc_read_err; } } else { diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 073e4a478c89a4db1b47eec392a23823fcf82230..3cd995de1bbb58f94245204d83920faaa3161821 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -791,7 +791,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out, rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); if (rc) - usb_unlink_urb(dev->out_urb); + usb_kill_urb(dev->out_urb); exit: mutex_unlock(&dev->out_urb_lock); diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index dacb9166081b9e152a70002404c5f6eb544ad746..2f08e16ba5661fc996f590a550979602cc9102a8 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -719,6 +719,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev, NFC_PROTO_FELICA_MASK; } else { kfree_skb(nfcid_skb); + nfcid_skb = NULL; /* P2P in type A */ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_NFCID1, diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 7310a261c858bab090bab14ecf3646626aac7651..e175cbeba266f3803fbc0bbf845b267a1135935c 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -330,7 +330,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, return 0; } -static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) { u64 shift, mask; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index cdb7752dcbb790e3f7991e248187a64c37bbe8d9..446f61ba018d72d46c5647c489e37c21bcdbd6ea 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -47,9 +47,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, } host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[READ]), 1000); host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); @@ -75,11 +77,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, rcu_read_lock(); list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read += - part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written += - part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); } rcu_read_unlock(); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 814373de8c9d373f7184c9711f5ecad5082ebfdc..0441be73c1d86e2a8879dd3e5ec05c4eb76b4589 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -371,7 +371,7 @@ static struct nvmem_device *nvmem_find(const char *name) d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); if (!d) - return NULL; + return ERR_PTR(-ENOENT); return to_nvmem_device(d); } diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c old mode 100644 new mode 100755 index cb1fbafc52cefe6eab650f40a3d1b600edcbec54..5628820b9595aefe52d47a31ee56a793de9208d2 --- a/drivers/nvmem/nvmem-sysfs.c +++ b/drivers/nvmem/nvmem-sysfs.c @@ -198,10 +198,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, if (!config->base_dev) return -EINVAL; - if (nvmem->read_only) - nvmem->eeprom = bin_attr_ro_root_nvmem; - else - nvmem->eeprom = bin_attr_rw_root_nvmem; + if (nvmem->read_only) { + if (config->root_only) + nvmem->eeprom = bin_attr_ro_root_nvmem; + else + nvmem->eeprom = bin_attr_ro_nvmem; + } else { + if (config->root_only) + nvmem->eeprom = bin_attr_rw_root_nvmem; + else + nvmem->eeprom = bin_attr_rw_nvmem; + } nvmem->eeprom.attr.name = "eeprom"; nvmem->eeprom.size = nvmem->size; #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/drivers/of/base.c b/drivers/of/base.c index f8b21d85754dd28ac53352ba3d0ee27e3a5b772f..a769148b52ac65b85feff9f9eeb17c26b1f80f7d 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -2257,7 +2257,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np) /* OF on pmac has nodes instead of properties named "l2-cache" * beneath CPU nodes. */ - if (!strcmp(np->type, "cpu")) + if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu")) for_each_child_of_node(np, child) if (!strcmp(child->type, "cache")) return child; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0a1ebbbd3f163d49f778edcdb332570c8e3adffe..92530525e35564083b745db91369fe1b6cbb71cb 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -933,6 +933,7 @@ static int __init unittest_data_add(void) of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node); if (!unittest_data_node) { pr_warn("%s: No tree to attach; not running tests\n", __func__); + kfree(unittest_data); return -ENODATA; } of_node_set_flag(unittest_data_node, OF_DETACHED); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index ed92c1254cff473113f9b24e25a598a72871891d..d842ae5310f717e395d1710f8960625aa7f3d949 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -160,6 +160,15 @@ struct dino_device (struct dino_device *)__pdata; }) +/* Check if PCI device is behind a Card-mode Dino. */ +static int pci_dev_is_behind_card_dino(struct pci_dev *dev) +{ + struct dino_device *dino_dev; + + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); + return is_card_dino(&dino_dev->hba.dev->id); +} + /* * Dino Configuration Space Accessor Functions */ @@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev) } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); +#ifdef CONFIG_TULIP +static void pci_fixup_tulip(struct pci_dev *dev) +{ + if (!pci_dev_is_behind_card_dino(dev)) + return; + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM)) + return; + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n", + pci_name(dev)); + /* Disable this card by zeroing the PCI resources */ + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); + memset(&dev->resource[1], 0, sizeof(dev->resource[1])); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip); +#endif /* CONFIG_TULIP */ static void __init dino_bios_init(void) diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index eac0a1238e9d0946585ff1c9ce8cc41b8cd71b7b..c690299d5c4a86225316178d90b9e158b6c5b260 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -43,6 +43,7 @@ #define PCIE_RC_K2HK 0xb008 #define PCIE_RC_K2E 0xb009 #define PCIE_RC_K2L 0xb00a +#define PCIE_RC_K2G 0xb00b #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) @@ -57,6 +58,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { 0, }, }; diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index b8a8a92d6d05ba2dd1255af3ce87dea2c0ef7475..c0da01ff390c240d7d1808f5e14d4aae526301a7 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -1332,6 +1332,7 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev, static void __iomem *loopback_lbar_vir; int ret, i; u32 base_sel_size = 0; + u32 wr_ofst = 0; switch (testcase) { case MSM_PCIE_OUTPUT_PCIE_INFO: @@ -1563,22 +1564,24 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev, break; } + wr_ofst = wr_offset; + PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n", dev->res[base_sel - 1].name, dev->res[base_sel - 1].base, - wr_offset, wr_mask, wr_value); + wr_ofst, wr_mask, wr_value); base_sel_size = resource_size(dev->res[base_sel - 1].resource); - if (wr_offset > base_sel_size - 4 || - msm_pcie_check_align(dev, wr_offset)) + if (wr_ofst > base_sel_size - 4 || + msm_pcie_check_align(dev, wr_ofst)) PCIE_DBG_FS(dev, "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n", - dev->rc_idx, wr_offset, base_sel_size - 4); + dev->rc_idx, wr_ofst, base_sel_size - 4); else msm_pcie_write_reg_field(dev->res[base_sel - 1].base, - wr_offset, wr_mask, wr_value); + wr_ofst, wr_mask, wr_value); break; case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE: @@ -5545,7 +5548,7 @@ static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev, { bool child_l0s_enable = 0, child_l1_enable = 0, child_l1ss_enable = 0; - if (!pdev->subordinate || !(&pdev->subordinate->devices)) { + if (!pdev->subordinate || list_empty(&pdev->subordinate->devices)) { PCIE_DBG(dev, "PCIe: RC%d: no device connected to root complex\n", dev->rc_idx); diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 8dfccf7332411b379d262c165829efc3195ac21a..90be00c1bab995e2dbcf2a3760bf37866455dd0b 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -603,12 +603,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); -/* Tegra PCIE requires relaxed ordering */ +/* Tegra20 and Tegra30 PCIE requires relaxed ordering */ static void tegra_pcie_relax_enable(struct pci_dev *dev) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } -DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable); static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) { @@ -1898,14 +1901,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) err = of_pci_get_devfn(port); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } index = PCI_SLOT(err); if (index < 1 || index > soc->num_ports) { dev_err(dev, "invalid port number: %d\n", index); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } index--; @@ -1914,12 +1918,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) if (err < 0) { dev_err(dev, "failed to parse # of lanes: %d\n", err); - return err; + goto err_node_put; } if (value > 16) { dev_err(dev, "invalid # of lanes: %u\n", value); - return -EINVAL; + err = -EINVAL; + goto err_node_put; } lanes |= value << (index << 3); @@ -1933,13 +1938,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) - return -ENOMEM; + if (!rp) { + err = -ENOMEM; + goto err_node_put; + } err = of_address_to_resource(port, 0, &rp->regs); if (err < 0) { dev_err(dev, "failed to parse address: %d\n", err); - return err; + goto err_node_put; } INIT_LIST_HEAD(&rp->list); @@ -1966,6 +1973,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return err; return 0; + +err_node_put: + of_node_put(port); + return err; } /* diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a07533702d269f48c55bff686cd4efc634356358..e09653c73ab4b30fe1e8773cb208b19d1947097d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -753,19 +753,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) } } -/** - * pci_power_up - Put the given device into D0 forcibly - * @dev: PCI device to power up - */ -void pci_power_up(struct pci_dev *dev) -{ - if (platform_pci_power_manageable(dev)) - platform_pci_set_power_state(dev, PCI_D0); - - pci_raw_set_power_state(dev, PCI_D0); - pci_update_current_state(dev, PCI_D0); -} - /** * pci_platform_power_transition - Use platform to change device power state * @dev: PCI device to handle. @@ -941,6 +928,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) } EXPORT_SYMBOL(pci_set_power_state); +/** + * pci_power_up - Put the given device into D0 forcibly + * @dev: PCI device to power up + */ +void pci_power_up(struct pci_dev *dev) +{ + __pci_start_power_transition(dev, PCI_D0); + pci_raw_set_power_state(dev, PCI_D0); + pci_update_current_state(dev, PCI_D0); +} + /** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 547ca7b3f09850ce0f0928b4fe0a26cbcaab14d2..ddb530ee2255cbc7c1ec9130eb160fa12710b44d 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -144,6 +144,7 @@ #define PMBR1 0x0D #define GPIO_USB_4PIN_ULPI_2430C (3 << 0) +static irqreturn_t twl4030_usb_irq(int irq, void *_twl); /* * If VBUS is valid or ID is ground, then we know a * cable is present and we need to be runtime-enabled @@ -392,6 +393,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on) WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); } +static int __maybe_unused twl4030_usb_suspend(struct device *dev) +{ + struct twl4030_usb *twl = dev_get_drvdata(dev); + + /* + * we need enabled runtime on resume, + * so turn irq off here, so we do not get it early + * note: wakeup on usb plug works independently of this + */ + dev_dbg(twl->dev, "%s\n", __func__); + disable_irq(twl->irq); + + return 0; +} + +static int __maybe_unused twl4030_usb_resume(struct device *dev) +{ + struct twl4030_usb *twl = dev_get_drvdata(dev); + + dev_dbg(twl->dev, "%s\n", __func__); + enable_irq(twl->irq); + /* check whether cable status changed */ + twl4030_usb_irq(0, twl); + + return 0; +} + static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev) { struct twl4030_usb *twl = dev_get_drvdata(dev); @@ -652,6 +680,7 @@ static const struct phy_ops ops = { static const struct dev_pm_ops twl4030_usb_pm_ops = { SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend, twl4030_usb_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume) }; static int twl4030_usb_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 13a4c27741572e04fb03532cfc0820dd1623ff6b..6adfb379ac7e6e44d4863e349aafb91a482a0ccf 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev, const struct ns2_pin_function *func; const struct ns2_pin_group *grp; - if (grp_select > pinctrl->num_groups || - func_select > pinctrl->num_functions) + if (grp_select >= pinctrl->num_groups || + func_select >= pinctrl->num_functions) return -EINVAL; func = &pinctrl->functions[func_select]; diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 88ba9c50cc8ed1335078e7a3f7af066bc8f7fece..b596f45426ea0c7429b04a6c9e2f9a471dc8e457 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -479,7 +479,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, unsigned num_pins, num_configs, reserve; unsigned long *configs; struct property *pins; - bool has_config; u32 pinfunc; int ret, i; @@ -495,9 +494,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, return ret; } - if (num_configs) - has_config = true; - num_pins = pins->length / sizeof(u32); if (!num_pins) { dev_err(pctldev->dev, "no pins found in node %s\n", @@ -511,7 +507,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, * map for each pin. */ reserve = 1; - if (has_config && num_pins >= 1) + if (num_configs) reserve++; reserve *= num_pins; ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, @@ -534,7 +530,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, group, func); - if (has_config) { + if (num_configs) { ret = pinctrl_utils_add_map_configs(pctldev, map, reserved_maps, num_maps, group, configs, num_configs, diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9f09041859099134220b54c7aee691f8810ee09d..9401a0630e80b545846c28fdf55142e43662ac4d 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1545,16 +1545,6 @@ void at91_pinctrl_gpio_resume(void) #define gpio_irq_set_wake NULL #endif /* CONFIG_PM */ -static struct irq_chip gpio_irqchip = { - .name = "GPIO", - .irq_ack = gpio_irq_ack, - .irq_disable = gpio_irq_mask, - .irq_mask = gpio_irq_mask, - .irq_unmask = gpio_irq_unmask, - /* .irq_set_type is set dynamically */ - .irq_set_wake = gpio_irq_set_wake, -}; - static void gpio_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); @@ -1595,12 +1585,22 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, struct gpio_chip *gpiochip_prev = NULL; struct at91_gpio_chip *prev = NULL; struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); + struct irq_chip *gpio_irqchip; int ret, i; + gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL); + if (!gpio_irqchip) + return -ENOMEM; + at91_gpio->pioc_hwirq = irqd_to_hwirq(d); - /* Setup proper .irq_set_type function */ - gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type; + gpio_irqchip->name = "GPIO"; + gpio_irqchip->irq_ack = gpio_irq_ack; + gpio_irqchip->irq_disable = gpio_irq_mask; + gpio_irqchip->irq_mask = gpio_irq_mask; + gpio_irqchip->irq_unmask = gpio_irq_unmask; + gpio_irqchip->irq_set_wake = gpio_irq_set_wake, + gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type; /* Disable irqs of this PIO controller */ writel_relaxed(~0, at91_gpio->regbase + PIO_IDR); @@ -1611,7 +1611,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, * interrupt. */ ret = gpiochip_irqchip_add(&at91_gpio->chip, - &gpio_irqchip, + gpio_irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); @@ -1629,7 +1629,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, if (!gpiochip_prev) { /* Then register the chain on the parent IRQ */ gpiochip_set_chained_irqchip(&at91_gpio->chip, - &gpio_irqchip, + gpio_irqchip, at91_gpio->pioc_virq, gpio_irq_handler); return 0; diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index e053f1fa551203cdf6e2fdeb2e3d8b78e75bdc0f..ab2a451f31562813d3580fabf9f3b84f99c61ec3 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = { LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA), }; -/** - * enum lpc18xx_pin_config_param - possible pin configuration parameters - * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt - * controller. - */ -enum lpc18xx_pin_config_param { - PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1, -}; +/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */ +#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1) static const struct pinconf_generic_params lpc18xx_params[] = { {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0}, diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c index e0ecffcbe11f6ec58026f59e63e66b69fc5be7c4..f8b54cfc90c7d36be1af61df7ce029df190d7db1 100644 --- a/drivers/pinctrl/pinctrl-zynq.c +++ b/drivers/pinctrl/pinctrl-zynq.c @@ -967,15 +967,12 @@ enum zynq_io_standards { zynq_iostd_max }; -/** - * enum zynq_pin_config_param - possible pin configuration parameters - * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to +/* + * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to * this parameter (on a custom format) tells the driver which alternative * IO standard to use. */ -enum zynq_pin_config_param { - PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1, -}; +#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1) static const struct pinconf_generic_params zynq_dt_params[] = { {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18}, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c old mode 100644 new mode 100755 index ac83345f9bab943b60bfdb944af03047ac9775ec..0165373573673cd92a6a4f71aeb873b6d6dfee27 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1173,11 +1173,24 @@ static int pmic_gpio_probe(struct platform_device *pdev) goto err_free; } - ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins); - if (ret) { - dev_err(dev, "failed to add pin range\n, ret=%d\n", ret); - gpiochip_remove(&state->chip); - goto err_free; + /* + * For DeviceTree-supported systems, the gpio core checks the + * pinctrl's device node for the "gpio-ranges" property. + * If it is present, it takes care of adding the pin ranges + * for the driver. In this case the driver can skip ahead. + * + * In order to remain compatible with older, existing DeviceTree + * files which don't set the "gpio-ranges" property or systems that + * utilize ACPI the driver has to call gpiochip_add_pin_range(). + */ + if (!of_property_read_bool(dev->of_node, "gpio-ranges")) { + ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, + npins); + if (ret) { + dev_err(dev, "failed to add pin range\n, ret=%d\n", ret); + gpiochip_remove(&state->chip); + goto err_free; + } } err_free: diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 277622b4b6fb9bb90b4c045e984ca106eb2f4b2f..1d9f63e954c715710f8c595223c5dc32c6cae10b 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -52,7 +52,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg) static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg) { - writel(val, pmx->regs[bank] + reg); + writel_relaxed(val, pmx->regs[bank] + reg); + /* make sure pinmux register write completed */ + pmx_readl(pmx, bank, reg); } static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 736c5949e90305ae88a2635c41f372c8a74d6f76..2564cd54ddc84d37d07e34515c3923c6758172e8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -2061,8 +2061,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case IPA_IOC_GET_PHERIPHERAL_EP_INFO: IPADBG("Got IPA_IOC_GET_EP_INFO\n"); - if (ipa3_ctx->ipa_config_is_auto == false) - return -ENOTTY; + if (ipa3_ctx->ipa_config_is_auto == false) { + IPADBG("not an auto config: returning error\n"); + retval = -ENOTTY; + break; + } if (copy_from_user(&ep_info, (const void __user *)arg, sizeof(struct ipa_ioc_get_ep_info))) { IPAERR_RL("copy_from_user fails\n"); @@ -5140,10 +5143,6 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf, IPADBG("user input string %s\n", dbg_buff); - /* Prevent consequent calls from trying to load the FW again. */ - if (ipa3_is_ready()) - return count; - /* Check MHI configuration on MDM devices */ if (!ipa3_is_msm_device()) { @@ -5183,6 +5182,10 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf, } } + /* Prevent consequent calls from trying to load the FW again. */ + if (ipa3_is_ready()) + return count; + /* Prevent multiple calls from trying to load the FW again. */ if (ipa3_ctx->fw_loaded) { IPAERR("not load FW again\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 5df2b902bb8a757c17d092825bf59b41bf4c7675..41663929ca1532bd734beb204be63a66782074c8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -311,7 +311,7 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config [IPA_4_0_AUTO] = { /*not-used UL_DL CV2X not-used, other are invalid */ [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { - {1, 63}, {1, 63}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + {1, 63}, {1, 63}, {2, 2}, {0, 0}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { @@ -324,7 +324,7 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config [IPA_4_0_AUTO_MHI] = { /* PCIE DDR DMA/CV2X not used, other are invalid */ [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { - {4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} }, + {3, 3}, {5, 5}, {2, 2}, {0, 0}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { {10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} }, [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c index 22ab30fa4b05af2a4dc8ac325b656b22fc0afd9b..d045d599cc9a81d434dd21270c0371a40a74ad4c 100644 --- a/drivers/platform/msm/mhi_dev/mhi_uci.c +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -996,6 +996,15 @@ static int mhi_state_uevent(struct device *dev, struct kobj_uevent_env *env) mhi_parse_state(buf, &nbytes, info); add_uevent_var(env, "MHI_CHANNEL_STATE_12=%s", buf); + rc = mhi_ctrl_state_info(MHI_CLIENT_DCI_OUT, &info); + if (rc) { + pr_err("Failed to obtain channel 20 state\n"); + return -EINVAL; + } + nbytes = 0; + mhi_parse_state(buf, &nbytes, info); + add_uevent_var(env, "MHI_CHANNEL_STATE_20=%s", buf); + return 0; } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 69ffbd7b76f7421176815daef489c369e9a99077..0fd7e40b86a0d29cbc176ee2e08603baf6b983bc 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -78,10 +78,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str, static struct quirk_entry quirk_asus_unknown = { .wapf = 0, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_q500a = { .i8042_filter = asus_q500a_i8042_filter, + .wmi_backlight_set_devstate = true, }; /* @@ -92,15 +94,18 @@ static struct quirk_entry quirk_asus_q500a = { static struct quirk_entry quirk_asus_x55u = { .wapf = 4, .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, .no_display_toggle = true, }; static struct quirk_entry quirk_asus_wapf4 = { .wapf = 4, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_x200ca = { .wapf = 2, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_no_rfkill = { @@ -114,13 +119,16 @@ static struct quirk_entry quirk_no_rfkill_wapf4 = { static struct quirk_entry quirk_asus_ux303ub = { .wmi_backlight_native = true, + .wmi_backlight_set_devstate = true, }; static struct quirk_entry quirk_asus_x550lb = { + .wmi_backlight_set_devstate = true, .xusb2pr = 0x01D9, }; -static struct quirk_entry quirk_asus_ux330uak = { +static struct quirk_entry quirk_asus_forceals = { + .wmi_backlight_set_devstate = true, .wmi_force_als_set = true, }; @@ -431,7 +439,7 @@ static const struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"), }, - .driver_data = &quirk_asus_ux330uak, + .driver_data = &quirk_asus_forceals, }, { .callback = dmi_matched, @@ -442,6 +450,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x550lb, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. UX430UQ", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"), + }, + .driver_data = &quirk_asus_forceals, + }, {}, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 10bd13b301784bff53abff1207d971ea6cafc0c5..aede41a92cacb29834080012b0a6c65e9c37f53f 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2154,7 +2154,7 @@ static int asus_wmi_add(struct platform_device *pdev) err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; - } else + } else if (asus->driver->quirks->wmi_backlight_set_devstate) err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); status = wmi_install_notify_handler(asus->driver->event_guid, diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 5db052d1de1e16a8eb6edb7e0208e08357957478..53bab79780e2212b5883f0532b66b52070d1c703 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -45,6 +45,7 @@ struct quirk_entry { bool store_backlight_power; bool wmi_backlight_power; bool wmi_backlight_native; + bool wmi_backlight_set_devstate; bool wmi_force_als_set; int wapf; /* diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index 90b0b5a70ce52a303017319e31afb4bd390d230f..04ca990e8f6cb6272f78e2eef38a55e63bd097db 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -246,6 +246,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev) if (!pdev->dev.of_node) return -ENODEV; + if (at91_shdwc) + return -EBUSY; + at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL); if (!at91_shdwc) return -ENOMEM; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 2199f673118c001e17ab09b4175d90946687d511..ea8c26a108f00d1ddc331ea7a0ce16da7da4ef36 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2437,17 +2437,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf, size_t count) { unsigned long charge_full; - ssize_t ret; + int ret; ret = kstrtoul(buf, 10, &charge_full); + if (ret) + return ret; - dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full); - - if (!ret) { - di->bat_cap.max_mah = (int) charge_full; - ret = count; - } - return ret; + di->bat_cap.max_mah = (int) charge_full; + return count; } static ssize_t charge_now_show(struct ab8500_fg *di, char *buf) @@ -2459,20 +2456,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf, size_t count) { unsigned long charge_now; - ssize_t ret; + int ret; ret = kstrtoul(buf, 10, &charge_now); + if (ret) + return ret; - dev_dbg(di->dev, "Ret %zd charge_now %lu was %d", - ret, charge_now, di->bat_cap.prev_mah); - - if (!ret) { - di->bat_cap.user_mah = (int) charge_now; - di->flags.user_cap = true; - ret = count; - queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); - } - return ret; + di->bat_cap.user_mah = (int) charge_now; + di->flags.user_cap = true; + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); + return count; } static struct ab8500_fg_sysfs_entry charge_full_attr = diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c index b64cf0f1414254336e68c5e07906f420258ab6cd..66438029bdd0c0993cfe6bb4dade2ba2b8fdcf14 100644 --- a/drivers/power/supply/max8998_charger.c +++ b/drivers/power/supply/max8998_charger.c @@ -85,7 +85,7 @@ static const struct power_supply_desc max8998_battery_desc = { static int max8998_battery_probe(struct platform_device *pdev) { struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev); + struct max8998_platform_data *pdata = iodev->pdata; struct power_supply_config psy_cfg = {}; struct max8998_battery_data *max8998; struct i2c_client *i2c; diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index a6efed11320606b9fd661f1642c993427ad4f0df..9fdf7a27f722c909230492458b252e507783af57 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -98,7 +98,8 @@ static ssize_t power_supply_show_property(struct device *dev, dev_dbg(dev, "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV && ret != -EAGAIN) - dev_err(dev, "driver failed to report `%s' property: %zd\n", + dev_err_ratelimited(dev, + "driver failed to report `%s' property: %zd\n", attr->attr.name, ret); return ret; } diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index bcd4dc304f270c50739e2b2ddfc2850e1efe3bf0..5b1f147b11cb0d77116810d4458a9967d4b8a10c 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -449,7 +449,8 @@ static void twl4030_current_worker(struct work_struct *data) if (v < USB_MIN_VOLT) { /* Back up and stop adjusting. */ - bci->usb_cur -= USB_CUR_STEP; + if (bci->usb_cur >= USB_CUR_STEP) + bci->usb_cur -= USB_CUR_STEP; bci->usb_cur_target = bci->usb_cur; } else if (bci->usb_cur >= bci->usb_cur_target || bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) { @@ -468,6 +469,7 @@ static void twl4030_current_worker(struct work_struct *data) static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable) { int ret; + u32 reg; if (bci->usb_mode == CHARGE_OFF) enable = false; @@ -481,14 +483,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable) bci->usb_enabled = 1; } - if (bci->usb_mode == CHARGE_AUTO) + if (bci->usb_mode == CHARGE_AUTO) { + /* Enable interrupts now. */ + reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | + TWL4030_TBATOR2 | TWL4030_TBATOR1 | + TWL4030_BATSTS); + ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, + TWL4030_INTERRUPTS_BCIIMR1A); + if (ret < 0) { + dev_err(bci->dev, + "failed to unmask interrupts: %d\n", + ret); + return ret; + } /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB); + } /* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */ ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0, TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4); if (bci->usb_mode == CHARGE_LINEAR) { + /* Enable interrupts now. */ + reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 | + TWL4030_TBATOR1 | TWL4030_BATSTS); + ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, + TWL4030_INTERRUPTS_BCIIMR1A); + if (ret < 0) { + dev_err(bci->dev, + "failed to unmask interrupts: %d\n", + ret); + return ret; + } twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0); /* Watch dog key: WOVF acknowledge */ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33, diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 0d8130450dd362d7045160dd32e9fd56a00194ee..c56a99f10228ca0097f0af8ec322ac49992b0e5c 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4859,7 +4859,7 @@ static int __init regulator_init(void) /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); -static int __init regulator_late_cleanup(struct device *dev, void *data) +static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); const struct regulator_ops *ops = rdev->desc->ops; @@ -4908,17 +4908,8 @@ static int __init regulator_late_cleanup(struct device *dev, void *data) return 0; } -static int __init regulator_init_complete(void) +static void regulator_init_complete_work_function(struct work_struct *work) { - /* - * Since DT doesn't provide an idiomatic mechanism for - * enabling full constraints and since it's much more natural - * with DT to provide them just assume that a DT enabled - * system has full constraints. - */ - if (of_have_populated_dt()) - has_full_constraints = true; - /* * Regulators may had failed to resolve their input supplies * when were registered, either because the input supply was @@ -4936,6 +4927,35 @@ static int __init regulator_init_complete(void) */ class_for_each_device(®ulator_class, NULL, NULL, regulator_late_cleanup); +} + +static DECLARE_DELAYED_WORK(regulator_init_complete_work, + regulator_init_complete_work_function); + +static int __init regulator_init_complete(void) +{ + /* + * Since DT doesn't provide an idiomatic mechanism for + * enabling full constraints and since it's much more natural + * with DT to provide them just assume that a DT enabled + * system has full constraints. + */ + if (of_have_populated_dt()) + has_full_constraints = true; + + /* + * We punt completion for an arbitrary amount of time since + * systems like distros will load many drivers from userspace + * so consumers might not always be ready yet, this is + * particularly an issue with laptops where this might bounce + * the display off then on. Ideally we'd get a notification + * from userspace when this happens but we don't so just wait + * a bit and hope we waited long enough. It'd be better if + * we'd only do this on systems that need it, and a kernel + * command line option might be useful. + */ + schedule_delayed_work(®ulator_init_complete_work, + msecs_to_jiffies(30000)); return 0; } diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c index f53e63301a2054d7a822f5017a8e9a2d7d5a35b4..e71117d7217bc9e720ffbe810d8ee044fd58fdba 100644 --- a/drivers/regulator/lm363x-regulator.c +++ b/drivers/regulator/lm363x-regulator.c @@ -33,7 +33,7 @@ /* LM3632 */ #define LM3632_BOOST_VSEL_MAX 0x26 -#define LM3632_LDO_VSEL_MAX 0x29 +#define LM3632_LDO_VSEL_MAX 0x28 #define LM3632_VBOOST_MIN 4500000 #define LM3632_VLDO_MIN 4000000 diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 86b348740fcdcf96f79d8fef8f954af07ea5bddf..ffb1f61d2c752e2598f71de3be87e76dbe15e926 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -608,7 +608,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client, /* SW2~SW4 high bit check and modify the voltage value table */ if (i >= sw_check_start && i <= sw_check_end) { - regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); + ret = regmap_read(pfuze_chip->regmap, + desc->vsel_reg, &val); + if (ret) { + dev_err(&client->dev, "Fails to read from the register.\n"); + return ret; + } + if (val & sw_hi) { if (pfuze_chip->chip_id == PFUZE3000) { desc->volt_table = pfuze3000_sw2hi; diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index d2f9942987535586dc69da863fbaa6b4ab16bdf4..6d17357b3a24891780fa6606b280a21c9c9ffef7 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb) while (timeout++ <= abb->settling_time) { status = ti_abb_check_txdone(abb); if (status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** @@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb) status = ti_abb_check_txdone(abb); if (!status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 188205a5526132b8b774532316b124972f013b02..d0ebca301afc93e7724e4ea61f35cea841a641aa 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -324,28 +324,29 @@ struct reset_control *__of_reset_control_get(struct device_node *node, break; } } - of_node_put(args.np); if (!rcdev) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(-EPROBE_DEFER); + rstc = ERR_PTR(-EPROBE_DEFER); + goto out; } if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(-EINVAL); + rstc = ERR_PTR(-EINVAL); + goto out; } rstc_id = rcdev->of_xlate(rcdev, &args); if (rstc_id < 0) { - mutex_unlock(&reset_list_mutex); - return ERR_PTR(rstc_id); + rstc = ERR_PTR(rstc_id); + goto out; } /* reset_list_mutex also protects the rcdev's reset_control list */ rstc = __reset_control_get_internal(rcdev, rstc_id, shared); +out: mutex_unlock(&reset_list_mutex); + of_node_put(args.np); return rstc; } diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 3c8c6f942e67ff809c6e8fc79912bd6af0dd0bc0..a06792966ea90fa580ab2a8e71d08913fcae88e3 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -94,8 +94,9 @@ static int pcf8523_voltage_low(struct i2c_client *client) return !!(value & REG_CONTROL3_BLF); } -static int pcf8523_select_capacitance(struct i2c_client *client, bool high) +static int pcf8523_load_capacitance(struct i2c_client *client) { + u32 load; u8 value; int err; @@ -103,14 +104,24 @@ static int pcf8523_select_capacitance(struct i2c_client *client, bool high) if (err < 0) return err; - if (!high) - value &= ~REG_CONTROL1_CAP_SEL; - else + load = 12500; + of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads", + &load); + + switch (load) { + default: + dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", + load); + /* fall through */ + case 12500: value |= REG_CONTROL1_CAP_SEL; + break; + case 7000: + value &= ~REG_CONTROL1_CAP_SEL; + break; + } err = pcf8523_write(client, REG_CONTROL1, value); - if (err < 0) - return err; return err; } @@ -307,9 +318,10 @@ static int pcf8523_probe(struct i2c_client *client, if (!pcf) return -ENOMEM; - err = pcf8523_select_capacitance(client, true); + err = pcf8523_load_capacitance(client); if (err < 0) - return err; + dev_warn(&client->dev, "failed to set xtal load capacitance: %d", + err); err = pcf8523_set_pm(client, 0); if (err < 0) diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 5dab4665ca3bd2c488fd4548f15fce3945bb6fad..3e0eea3aa876db6d1286f5b7f9d4911b57481dae 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -106,7 +106,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) */ static int s35390a_reset(struct s35390a *s35390a, char *status1) { - char buf; + u8 buf; int ret; unsigned initcount = 0; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index e443b0d0b23612efec3800a5468e36e58ef7f8d3..0d59c128f734309f505ef0d3543f29a46c20087f 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -369,7 +369,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, goto error; } /* Check for trailing stuff. */ - if (i == num_devices && strlen(buf) > 0) { + if (i == num_devices && buf && strlen(buf) > 0) { rc = -EINVAL; goto error; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 3d2b20ee613f8e38843bde3a2fed32a5ba05e347..39a2b0cde9e424fd6efb389b42229c0c2d6243a4 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1120,6 +1120,8 @@ device_initcall(cio_settle_init); int sch_is_pseudo_sch(struct subchannel *sch) { + if (!sch->dev.parent) + return 0; return sch == to_css(sch->dev.parent)->pseudo_subchannel; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 6ba4e921d2fd31ba2fa8cdbe8d01850f66043389..51152681aba6e6fe2a7f032fb25120bee2854291 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -991,7 +991,10 @@ static int __qeth_l2_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6e6ba1baf9c489759be6ba3adaf66e1cd16541f0..b40a61d9ad9ecb97df01cf225fac9678ccef520e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3005,7 +3005,10 @@ static int __qeth_l3_open(struct net_device *dev) if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { napi_enable(&card->napi); + local_bh_disable(); napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } else rc = -EIO; return rc; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 1964391db9047f9d6b6fd0ff1c80a570f8bee6c6..a3aaef4c53a3ce95218cd9091baf90e20a57669c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -20,6 +20,11 @@ struct kmem_cache *zfcp_fsf_qtcb_cache; +static bool ber_stop = true; +module_param(ber_stop, bool, 0600); +MODULE_PARM_DESC(ber_stop, + "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); + static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; @@ -231,10 +236,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) case FSF_STATUS_READ_SENSE_DATA_AVAIL: break; case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - dev_warn(&adapter->ccw_device->dev, - "The error threshold for checksum statistics " - "has been exceeded\n"); zfcp_dbf_hba_bit_err("fssrh_3", req); + if (ber_stop) { + dev_warn(&adapter->ccw_device->dev, + "All paths over this FCP device are disused because of excessive bit errors\n"); + zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); + } else { + dev_warn(&adapter->ccw_device->dev, + "The error threshold for checksum statistics has been exceeded\n"); + } break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 17b1574920fd6555e178ee0768ff90b8a7377826..941e3f25b4a9f50fe60cb3a79fc88647a912ec9b 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -986,7 +986,7 @@ config SCSI_SNI_53C710 config 53C700_LE_ON_BE bool - depends on SCSI_LASI700 + depends on SCSI_LASI700 || SCSI_SNI_53C710 default y config SCSI_STEX diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 3cfab8868c9897691b4444e31f956680ae4e80f7..27270631c70c23c3dab0afae7f2a10703b1c4ad6 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -617,16 +617,15 @@ static void complete_cmd(struct Scsi_Host *instance, if (hostdata->sensing == cmd) { /* Autosense processing ends here */ - if ((cmd->result & 0xff) != SAM_STAT_GOOD) { + if (status_byte(cmd->result) != GOOD) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); - set_host_byte(cmd, DID_ERROR); - } else + } else { scsi_eh_restore_cmnd(cmd, &hostdata->ses); + set_driver_byte(cmd, DRIVER_SENSE); + } hostdata->sensing = NULL; } - hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); - cmd->scsi_done(cmd); } @@ -1798,6 +1797,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); return; #endif case PHASE_DATAIN: @@ -1880,6 +1880,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd, scmd_id(cmd), cmd->device->lun); hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); cmd->result &= ~0xffff; cmd->result |= cmd->SCp.Status; @@ -2039,6 +2040,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { hostdata->connected = NULL; + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); maybe_release_dma_irq(instance); @@ -2101,8 +2103,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(MODE_REG, MR_BASE); target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - - dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); + if (!target_mask || target_mask & (target_mask - 1)) { + shost_printk(KERN_WARNING, instance, + "reselect: bad target_mask 0x%02x\n", target_mask); + return; + } /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2116,6 +2121,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); if (NCR5380_poll_politely(instance, STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { + shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n"); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return; } @@ -2127,6 +2133,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { + if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0) + /* BUS FREE phase */ + return; + shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n"); do_abort(instance); return; } @@ -2188,13 +2198,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance) dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, "reselect: removed %p from disconnected queue\n", tmp); } else { + int target = ffs(target_mask) - 1; + shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", target_mask, lun); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ - do_abort(instance); + if (do_abort(instance) == 0) + hostdata->busy[target] &= ~(1 << lun); return; } @@ -2356,15 +2369,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (list_del_cmd(&hostdata->autosense, cmd)) { dsprintk(NDEBUG_ABORT, instance, "abort: removed %p from sense queue\n", cmd); - set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); } out: if (result == FAILED) dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); - else + else { + hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); + } queue_work(hostdata->work_q, &hostdata->main_task); maybe_release_dma_irq(instance); @@ -2392,7 +2406,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) spin_lock_irqsave(&hostdata->lock, flags); #if (NDEBUG & NDEBUG_ANY) - scmd_printk(KERN_INFO, cmd, __func__); + shost_printk(KERN_INFO, instance, __func__); #endif NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_dprint_phase(NDEBUG_ANY, instance); @@ -2410,10 +2424,13 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) * commands! */ - if (list_del_cmd(&hostdata->unissued, cmd)) { + list_for_each_entry(ncmd, &hostdata->unissued, list) { + struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); + cmd->result = DID_RESET << 16; cmd->scsi_done(cmd); } + INIT_LIST_HEAD(&hostdata->unissued); if (hostdata->selecting) { hostdata->selecting->result = DID_RESET << 16; @@ -2432,7 +2449,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) list_for_each_entry(ncmd, &hostdata->autosense, list) { struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - set_host_byte(cmd, DID_RESET); cmd->scsi_done(cmd); } INIT_LIST_HEAD(&hostdata->autosense); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 5ee7f44cf869b906397447a0572b1b227e8951a1..830b2d2dcf20606f31b94f91fc9e2f55a0c6afae 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -1972,6 +1972,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) xferred -= psge->length; } else { /* Partial SG entry done */ + pci_dma_sync_single_for_cpu(srb->dcb-> + acb->dev, + srb->sg_bus_addr, + SEGMENTX_LEN, + PCI_DMA_TODEVICE); psge->length -= xferred; psge->address += xferred; srb->sg_index = idx; @@ -3450,14 +3455,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, } } - if (dir != PCI_DMA_NONE && scsi_sg_count(cmd)) - pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd), - scsi_sg_count(cmd), dir); - ckc_only = 0; /* Check Error Conditions */ ckc_e: + pci_unmap_srb(acb, srb); + if (cmd->cmnd[0] == INQUIRY) { unsigned char *base = NULL; struct ScsiInqData *ptr; @@ -3511,7 +3514,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, cmd, cmd->result); srb_free_insert(acb, srb); } - pci_unmap_srb(acb, srb); cmd->scsi_done(cmd); waiting_process_next(acb); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 98787588247bfb6ab088e3f1e657fe476d341493..60c288526355a53cfb7c85b6f3fb7b61cb8ed529 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -527,6 +527,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; + bool transitioning_sense = false; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; @@ -571,13 +572,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) goto retry; } /* - * Retry on ALUA state transition or if any - * UNIT ATTENTION occurred. + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && - sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) - err = SCSI_DH_RETRY; - else if (sense_hdr.sense_key == UNIT_ATTENTION) + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { @@ -665,7 +672,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) off = 8 + (desc[7] * 4); } + skip_rtpg: spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 02cb76fd442084cbc7ba108a00002f513c119f86..6bbf2945a3e00aff9e875ca11180673a650cd14d 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -3500,6 +3500,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) case START_STOP: scb->scsi_cmd->result = DID_OK << 16; + break; case TEST_UNIT_READY: case INQUIRY: diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 609dafd661d1437a0287c7511d8e31f7c1e1c199..da4583a2fa23e2f9543b56fb75908fdea9421f56 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) * the task management request. * @task_request: the handle to the task request object to start. */ -enum sci_task_status sci_controller_start_task(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *ireq) +enum sci_status sci_controller_start_task(struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) { enum sci_status status; @@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, "%s: SCIC Controller starting task from invalid " "state\n", __func__); - return SCI_TASK_FAILURE_INVALID_STATE; + return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_start_task(ihost, idev, ireq); diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 22a9bb1abae1473062b06031494bb38b2cc11576..15dc6e0d8deb0c2c501f76e02f14cde68d87ddda 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -490,7 +490,7 @@ enum sci_status sci_controller_start_io( struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_task_status sci_controller_start_task( +enum sci_status sci_controller_start_task( struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index b709d2b208809cce8442740a61c5429f5ffee982..7d71ca421751d3ba77a99f40fda1413afbf07f5e 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, if (status == SCI_SUCCESS) { if (ireq->stp.rsp.status & ATA_ERR) - status = SCI_IO_FAILURE_RESPONSE_VALID; + status = SCI_FAILURE_IO_RESPONSE_VALID; } else { - status = SCI_IO_FAILURE_RESPONSE_VALID; + status = SCI_FAILURE_IO_RESPONSE_VALID; } if (status != SCI_SUCCESS) { diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 6dcaed0c1fc8cf32c5e6956ff858b95818caf6d1..fb6eba331ac6eb9f51496682da04f50a33bdd063 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_tmf *tmf, unsigned long timeout_ms) { DECLARE_COMPLETION_ONSTACK(completion); - enum sci_task_status status = SCI_TASK_FAILURE; + enum sci_status status = SCI_FAILURE; struct isci_request *ireq; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; @@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, /* start the TMF io. */ status = sci_controller_start_task(ihost, idev, ireq); - if (status != SCI_TASK_SUCCESS) { + if (status != SCI_SUCCESS) { dev_dbg(&ihost->pdev->dev, "%s: start_io failed - status = 0x%x, request = %p\n", __func__, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index ace4f1f41b8e0bf196e7330cfd3f51d3eadbfc2a..d60564397be548e584e6777cb01ebf17c7f8bcac 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -798,7 +798,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, return rc; return iscsi_conn_get_addr_param((struct sockaddr_storage *) - &addr, param, buf); + &addr, + (enum iscsi_param)param, buf); default: return iscsi_host_get_param(shost, param, buf); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 400eee9d778329664a9b9b7e2378191340d331bb..d44f18f773c0fadd14e8bc6a11b2add043997836 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -2049,14 +2049,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) return res; } - /* delete the old link */ - if (SAS_ADDR(phy->attached_sas_addr) && - SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { - SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", - SAS_ADDR(dev->sas_addr), phy_id, - SAS_ADDR(phy->attached_sas_addr)); - sas_unregister_devs_sas_addr(dev, phy_id, last); - } + /* we always have to delete the old device when we went here */ + SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", + SAS_ADDR(dev->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr)); + sas_unregister_devs_sas_addr(dev, phy_id, last); return sas_discover_new(dev, phy_id); } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b5be4df05733fcc9d549f9af707fbb7a23564473..3702497b5b169804fbc7e5e065edc048c11ae89e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1141,6 +1141,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } if (!rc) { @@ -1155,6 +1156,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->fcf.fcf_flag &= ~FCF_DISCOVERY; phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 9cca5ddbc50ccb65d25f693827b671d472b81088..6eaba16768461ebe5489e403f8fc193837f1c986 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1969,6 +1969,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) "failover and change port state:x%x/x%x\n", phba->pport->port_state, LPFC_VPORT_UNKNOWN); phba->pport->port_state = LPFC_VPORT_UNKNOWN; + + if (!phba->fcf.fcf_redisc_attempted) { + lpfc_unregister_fcf(phba); + + rc = lpfc_sli4_redisc_fcf_table(phba); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "3195 Rediscover FCF table\n"); + phba->fcf.fcf_redisc_attempted = 1; + lpfc_sli4_clear_fcf_rr_bmask(phba); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3196 Rediscover FCF table " + "failed. Status:x%x\n", rc); + } + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "3197 Already rediscover FCF table " + "attempted. No more retry\n"); + } goto stop_flogi_current_fcf; } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e9ea8f4ea2c9200719093d9945a00552e4a23508..2f80b2c0409e0dd6139258ae593dade68f014ab2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4444,7 +4444,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, break; } /* If fast FCF failover rescan event is pending, do nothing */ - if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { + if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); break; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 56a3df4fddb05e3e8c15fb170d6938e382920717..21ec7b5b6c85c79181e4b05cee468d1a760000b9 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -759,9 +759,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && - (ndlp->nlp_type & NLP_FCP_TARGET))) { + (ndlp->nlp_type & NLP_FCP_TARGET)))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c05fc61a383b265c11cbf8d8d78ebfce176d8f1b..e1e0feb2500312d63ccce377014e278d5582ace7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -16553,15 +16553,8 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) goto initial_priority; lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2844 No roundrobin failover FCF available\n"); - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) - return LPFC_FCOE_FCF_NEXT_NONE; - else { - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, - "3063 Only FCF available idx %d, flag %x\n", - next_fcf_index, - phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); - return next_fcf_index; - } + + return LPFC_FCOE_FCF_NEXT_NONE; } if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 0b88b5703e0f10b835e79981594cc6a606e401f4..9c69c4215de30c26329d99852fbf35e1d77fb4db 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -237,6 +237,7 @@ struct lpfc_fcf { #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) + uint16_t fcf_redisc_attempted; uint32_t addr_mode; uint32_t eligible_fcf_cnt; struct lpfc_fcf_rec current_rec; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 19bffe0b2cc0a3d63328efd1e96b4bfe3c423b10..2cbfec6a7466261e9e65ee01f2837c0c5295a051 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4219,11 +4219,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && pdev->subsystem_device == 0xC000) - return -ENODEV; + goto out_disable_device; /* Now check the magic signature byte */ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) - return -ENODEV; + goto out_disable_device; /* Ok it is probably a megaraid */ } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d90693b2767fdba8ae30993f4c6bf0bf3953f98e..c5cc002dfdd5caaffaeafd83ddfa0c43c4bf57be 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3694,12 +3694,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) /* * The cur_state should not last for more than max_wait secs */ - for (i = 0; i < (max_wait * 1000); i++) { + for (i = 0; i < max_wait; i++) { curr_abs_state = instance->instancet-> read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { - msleep(1); + msleep(1000); } else break; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index cebfd734fd769c78a36fb66c1805f5fb89e37ac2..a9fef0cd382bd734acd10508423a5e3d17aa54eb 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -674,10 +674,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, r = _config_request(ioc, &mpi_request, mpi_reply, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sizeof(*config_page)); - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; - r = _config_request(ioc, &mpi_request, mpi_reply, - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, - sizeof(*config_page)); out: return r; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index caa00451d451ceb53978799fcf26d414f01c3d36..f0a3bb4961e5b196b0b17aed4b256a1394f61e94 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3297,6 +3297,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, return _scsih_check_for_pending_tm(ioc, smid); } +/** _scsih_allow_scmd_to_device - check whether scmd needs to + * issue to IOC or not. + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * + * Returns true if scmd can be issued to IOC otherwise returns false. + */ +inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + + if (ioc->pci_error_recovery) + return false; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (ioc->remove_host) + return false; + + return true; + } + + if (ioc->remove_host) { + + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + + return true; +} /** * _scsih_sas_control_complete - completion routine @@ -4059,7 +4093,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - if (ioc->pci_error_recovery || ioc->remove_host) { + if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 10546faac58c62be292fef30551a293cc7147932..f374abfb7f1f88fff0d7db9391130151a1611000 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, } else { u32 producer_index; void *pi_virt = circularQ->pi_virt; + /* spurious interrupt during setup if + * kexec-ing and driver doing a doorbell access + * with the pre-kexec oq interrupt setup + */ + if (!pi_virt) + break; /* Update the producer index from SPC */ producer_index = pm8001_read_32(pi_virt); circularQ->producer_index = cpu_to_le32(producer_index); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index ce584c31d36e505a005b2e09aab8ccbaa6e4025c..e64a13f0bce17dee4debc8e62d397ffd685af369 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -374,6 +374,13 @@ static int pm8001_task_exec(struct sas_task *task, return 0; } pm8001_ha = pm8001_find_ha_by_dev(task->dev); + if (pm8001_ha->controller_fatal_error) { + struct task_status_struct *ts = &t->task_status; + + ts->resp = SAS_TASK_UNDELIVERED; + t->task_done(t); + return 0; + } PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); spin_lock_irqsave(&pm8001_ha->lock, flags); do { @@ -466,7 +473,7 @@ static int pm8001_task_exec(struct sas_task *task, dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) - dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, + dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); out_done: spin_unlock_irqrestore(&pm8001_ha->lock, flags); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 6628cc38316c2ef5871ef81db4d9fe772e402fee..d8768ac41ebbf3d9b49a7510578fdd4e940038fe 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -531,6 +531,7 @@ struct pm8001_hba_info { u32 logging_level; u32 fw_status; u32 smp_exp_mode; + bool controller_fatal_error; const struct firmware *fw_image; struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; }; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index eb4fee61df729f700c8db790a632b9fe3b146324..9edd61c063a1ab695511541f2c422b3817f9de55 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -572,6 +572,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + /* Update Fatal error interrupt vector */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + ((pm8001_ha->number_of_intr - 1) << 8); pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, @@ -1099,6 +1102,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) return -EBUSY; } + /* Initialize the controller fatal error flag */ + pm8001_ha->controller_fatal_error = false; + /* Initialize pci space address eg: mpi offset */ init_pci_device_addresses(pm8001_ha); init_default_table_values(pm8001_ha); @@ -1207,13 +1213,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) u32 bootloader_state; u32 ibutton0, ibutton1; - /* Check if MPI is in ready state to reset */ - if (mpi_uninit_check(pm8001_ha) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("MPI state is not ready\n")); - return -1; + /* Process MPI table uninitialization only if FW is ready */ + if (!pm8001_ha->controller_fatal_error) { + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "MPI state is not ready scratch1 :0x%x\n", + regval)); + return -1; + } } - /* checked for reset register normal state; 0x0 */ regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); PM8001_INIT_DBG(pm8001_ha, @@ -3717,6 +3727,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } +static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha) +{ + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6))); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7))); +} + static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; @@ -3724,10 +3774,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) u8 uninitialized_var(bc); u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; + u32 regval; + if (vec == (pm8001_ha->number_of_intr - 1)) { + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + if ((regval & SCRATCH_PAD_MIPSALL_READY) != + SCRATCH_PAD_MIPSALL_READY) { + pm8001_ha->controller_fatal_error = true; + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "Firmware Fatal error! Regval:0x%x\n", regval)); + print_scratchpad_registers(pm8001_ha); + return ret; + } + } spin_lock_irqsave(&pm8001_ha->lock, flags); circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { + /* spurious interrupt during setup if kexec-ing and + * driver doing a doorbell access w/ the pre-kexec oq + * interrupt setup. + */ + if (!circularQ->pi_virt) + break; ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 7a443bad61634ded2f99f9d44c0bebca856ea89a..411b414a9a0efa8fa26c3559bc3c1a916145894f 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -1288,6 +1288,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; #define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 #define SCRATCH_PAD_IOP0_READY 0xC00 #define SCRATCH_PAD_IOP1_READY 0x3000 +#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \ + SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_RAAE_READY) /* boot loader state */ #define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 4a6e086279f9a8c00a4f52f627f8ad55c3f4ae4e..33e4dceb895f2c4a0961ad071aa2d1267b898f5b 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -252,7 +252,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) srb_t *sp; const char *type; int req_sg_cnt, rsp_sg_cnt; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); uint16_t nextlid = 0; if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { @@ -426,7 +426,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job) struct Scsi_Host *host = bsg_job->shost; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); int req_sg_cnt, rsp_sg_cnt; uint16_t loop_id; struct fc_port *fcport; @@ -1911,7 +1911,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) struct Scsi_Host *host = bsg_job->shost; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); struct qla_mt_iocb_rqst_fx00 *piocb_rqst; srb_t *sp; int req_sg_cnt = 0, rsp_sg_cnt = 0; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c813c9b75a10bd6221ab0007d103c8d46d251acd..3bae56b202f8789ef0e994c879502fd75abade11 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3077,6 +3077,10 @@ qla2x00_shutdown(struct pci_dev *pdev) /* Stop currently executing firmware. */ qla2x00_try_to_stop_firmware(vha); + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + /* Turn adapter off line */ vha->flags.online = 0; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 11f45cb998927b1fe655c382b0a160d8a57fd32a..d13e91e164258ccce111edd643dd57526c88a0f4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -572,6 +572,7 @@ static void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; + u16 cnt = 0; while (!ACCESS_ONCE(sess->logout_completed)) { if (!traced) { @@ -581,6 +582,9 @@ static void qlt_free_session_done(struct work_struct *work) traced = true; } msleep(100); + cnt++; + if (cnt > 200) + break; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index bd70339c1242eb1f2515a7a947b007472823a3c8..03d9855a6afd71ef5893b37623dc27c39db1cdbc 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -16,57 +16,15 @@ #include #include -#define SCSI_LOG_SPOOLSIZE 4096 - -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG -#warning SCSI logging bitmask too large -#endif - -struct scsi_log_buf { - char buffer[SCSI_LOG_SPOOLSIZE]; - unsigned long map; -}; - -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); - static char *scsi_log_reserve_buffer(size_t *len) { - struct scsi_log_buf *buf; - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; - unsigned long idx = 0; - - preempt_disable(); - buf = this_cpu_ptr(&scsi_format_log); - idx = find_first_zero_bit(&buf->map, map_bits); - if (likely(idx < map_bits)) { - while (test_and_set_bit(idx, &buf->map)) { - idx = find_next_zero_bit(&buf->map, map_bits, idx); - if (idx >= map_bits) - break; - } - } - if (WARN_ON(idx >= map_bits)) { - preempt_enable(); - return NULL; - } - *len = SCSI_LOG_BUFSIZE; - return buf->buffer + idx * SCSI_LOG_BUFSIZE; + *len = 128; + return kmalloc(*len, GFP_ATOMIC); } static void scsi_log_release_buffer(char *bufptr) { - struct scsi_log_buf *buf; - unsigned long idx; - int ret; - - buf = this_cpu_ptr(&scsi_format_log); - if (bufptr >= buf->buffer && - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; - ret = test_and_clear_bit(idx, &buf->map); - WARN_ON(!ret); - } - preempt_enable(); + kfree(bufptr); } static inline const char *scmd_name(const struct scsi_cmnd *scmd) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5294fa31546bedf5d097a94786c79664deea9b81..522d118f5b8507ec5ff717b07ec8eaa8fb8f1bdb 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -710,6 +710,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; + struct scsi_device *sdev = to_scsi_device(dev); + + /* + * We need to try to get module, avoiding the module been removed + * during delete. + */ + if (scsi_device_get(sdev)) + return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); @@ -724,9 +732,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, * state into SDEV_DEL. */ device_remove_file(dev, attr); - scsi_remove_device(to_scsi_device(dev)); + scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); + scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 76278072147e20ca30444cd5d61fb4a3f5c6f124..b0f5220ae23a8b62213551b8e5336fecd351286e 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev) base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); - if (!hostdata) { - dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); + if (!hostdata) return -ENOMEM; - } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index c6425e3df5a04958dca96430eb507ad13684b76b..f1c7714377524d0aafe143bf7d3b791be3e3fa02 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -4371,6 +4371,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym OUTB(np, HS_PRT, HS_BUSY); } +#define sym_printk(lvl, tp, cp, fmt, v...) do { \ + if (cp) \ + scmd_printk(lvl, cp->cmd, fmt, ##v); \ + else \ + starget_printk(lvl, tp->starget, fmt, ##v); \ +} while (0) + /* * chip exception handler for programmed interrupts. */ @@ -4416,7 +4423,7 @@ static void sym_int_sir(struct sym_hcb *np) * been selected with ATN. We do not want to handle that. */ case SIR_SEL_ATN_NO_MSG_OUT: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No MSG OUT phase after selection with ATN\n"); goto out_stuck; /* @@ -4424,7 +4431,7 @@ static void sym_int_sir(struct sym_hcb *np) * having reselected the initiator. */ case SIR_RESEL_NO_MSG_IN: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No MSG IN phase after reselection\n"); goto out_stuck; /* @@ -4432,7 +4439,7 @@ static void sym_int_sir(struct sym_hcb *np) * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "No IDENTIFY after reselection\n"); goto out_stuck; /* @@ -4461,7 +4468,7 @@ static void sym_int_sir(struct sym_hcb *np) case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; - scmd_printk(KERN_WARNING, cp->cmd, + sym_printk(KERN_WARNING, tp, cp, "message %x sent on bad reselection\n", np->lastmsg); goto out; /* diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2f61bd8cfd02fcda37c692701d3d00b221a0906d..080fbedfefe41669e19d37592211360b14c3ec3a 100755 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -9964,6 +9964,9 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; + if (!hba->is_powered) + goto out; + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index a47cf638460a6bf7e2665d410192552cce6b2ad0..ccb6f98550da402754a67b78e61e77aab76b2f58 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -298,7 +298,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; unsigned int wcnt; - int max_fifo_depth, fifo_depth, bytes_per_word; + int max_fifo_depth, bytes_per_word; u32 chconf, xferlevel; mcspi = spi_master_get_devdata(master); @@ -314,10 +314,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, else max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; - fifo_depth = gcd(t->len, max_fifo_depth); - if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) - goto disable_fifo; - wcnt = t->len / bytes_per_word; if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) goto disable_fifo; @@ -325,16 +321,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, xferlevel = wcnt << 16; if (t->rx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFER; - xferlevel |= (fifo_depth - 1) << 8; + xferlevel |= (bytes_per_word - 1) << 8; } + if (t->tx_buf != NULL) { chconf |= OMAP2_MCSPI_CHCONF_FFET; - xferlevel |= fifo_depth - 1; + xferlevel |= bytes_per_word - 1; } mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); mcspi_write_chconf0(spi, chconf); - mcspi->fifo_depth = fifo_depth; + mcspi->fifo_depth = max_fifo_depth; return; } @@ -601,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) struct dma_slave_config cfg; enum dma_slave_buswidth width; unsigned es; - u32 burst; void __iomem *chstat_reg; void __iomem *irqstat_reg; int wait_res; @@ -623,22 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) } count = xfer->len; - burst = 1; - - if (mcspi->fifo_depth > 0) { - if (count > mcspi->fifo_depth) - burst = mcspi->fifo_depth / es; - else - burst = count / es; - } memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cfg.src_addr_width = width; cfg.dst_addr_width = width; - cfg.src_maxburst = burst; - cfg.dst_maxburst = burst; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; rx = xfer->rx_buf; tx = xfer->tx_buf; diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index fefb688a34328f863953b106052d1e9589e526f6..2f4df804c4d807eb2c3092882fe7a340d8d5f4e0 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -320,7 +320,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s, desc_rx = dmaengine_prep_slave_sg(master->dma_rx, xfer->rx_sg.sgl, xfer->rx_sg.nents, - DMA_FROM_DEVICE, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { ret = -EINVAL; @@ -330,7 +330,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s, desc_tx = dmaengine_prep_slave_sg(master->dma_tx, xfer->tx_sg.sgl, xfer->tx_sg.nents, - DMA_TO_DEVICE, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { ret = -EINVAL; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 0f89c2169c244e433a36e7cc7e672d385d3c6560..3a94f465e8e05eb46e197fcf0e31758c6fdfaf90 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -443,6 +443,9 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs) struct dma_slave_config rxconf, txconf; struct dma_async_tx_descriptor *rxdesc, *txdesc; + memset(&rxconf, 0, sizeof(rxconf)); + memset(&txconf, 0, sizeof(txconf)); + spin_lock_irqsave(&rs->lock, flags); rs->state &= ~RXBUSY; rs->state &= ~TXBUSY; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 711ea523b3251fad92bd02a249051c389382488f..8a69148a962a8c6cec964631414b1b68cd7eae1a 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1198,8 +1198,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) i = platform_get_irq(pdev, 0); if (i < 0) { - dev_err(&pdev->dev, "cannot get platform IRQ\n"); - ret = -ENOENT; + dev_err(&pdev->dev, "cannot get IRQ\n"); + ret = i; goto err1; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index c7d3fa9125d2c91d6701cdc5d394d12f4086cb2c..0602366e646968d23baa1edd7d956f3515907d03 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -752,11 +752,9 @@ static int spidev_probe(struct spi_device *spi) * compatible string, it is a Linux implementation thing * rather than a description of the hardware. */ - if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) { - dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n"); - WARN_ON(spi->dev.of_node && - !of_match_device(spidev_dt_ids, &spi->dev)); - } + WARN(spi->dev.of_node && + of_device_is_compatible(spi->dev.of_node, "spidev"), + "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node); spidev_probe_acpi(spi); diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 8a40f2e88b643635ba65c515c917a65cd33ee423..39738229ed2665bd0ebef1b5e6c22da40bf6e12a 100755 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -769,12 +769,15 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; + rcu_read_unlock(); /* give the system time to free up the memory */ msleep_interruptible(20); trace_almk_shrink(selected_tasksize, ret, other_free, other_file, selected_oom_score_adj); + + get_task_struct(selected); } else { trace_almk_shrink(1, ret, other_free, other_file, 0); rcu_read_unlock(); diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 608403c7586b767e4bb264300dc216d1fa0d785a..f0572d6a5f63792e9c6b1e39a2b611774ca6e602 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk + * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,7 +17,7 @@ * Description: University of Stirling USB DAQ & INCITE Technology Limited * Devices: [ITL] USB-DUX-FAST (usbduxfast) * Author: Bernd Porr - * Updated: 10 Oct 2014 + * Updated: 16 Nov 2019 * Status: stable */ @@ -31,6 +31,7 @@ * * * Revision history: + * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest * 0.9: Dropping the first data packet which seems to be from the last transfer. * Buffer overflows in the FX2 are handed over to comedi. * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. @@ -359,6 +360,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, struct comedi_cmd *cmd) { int err = 0; + int err2 = 0; unsigned int steps; unsigned int arg; @@ -408,11 +410,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, */ steps = (cmd->convert_arg * 30) / 1000; if (cmd->chanlist_len != 1) - err |= comedi_check_trigger_arg_min(&steps, - MIN_SAMPLING_PERIOD); - err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); - arg = (steps * 1000) / 30; - err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + err2 |= comedi_check_trigger_arg_min(&steps, + MIN_SAMPLING_PERIOD); + else + err2 |= comedi_check_trigger_arg_min(&steps, 1); + err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); + if (err2) { + err |= err2; + arg = (steps * 1000) / 30; + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); + } if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index 587f68aa466c2ee41c32e7d2822874eaaae91986..d9ba8c0f1353b37aba2c6b8b66ec56a63a04632d 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -247,7 +247,7 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par) static int fbtft_backlight_update_status(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); - bool polarity = !!(bd->props.state & BL_CORE_DRIVER1); + bool polarity = par->polarity; fbtft_par_dbg(DEBUG_BACKLIGHT, par, "%s: polarity=%d, power=%d, fb_blank=%d\n", @@ -296,7 +296,7 @@ void fbtft_register_backlight(struct fbtft_par *par) /* Assume backlight is off, get polarity from current state of pin */ bl_props.power = FB_BLANK_POWERDOWN; if (!gpio_get_value(par->gpio.led[0])) - bl_props.state |= BL_CORE_DRIVER1; + par->polarity = true; bd = backlight_device_register(dev_driver_string(par->info->device), par->info->device, par, &fbtft_bl_ops, &bl_props); @@ -814,7 +814,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, if (par->gamma.curves && gamma) { if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma, strlen(gamma))) - goto alloc_fail; + goto release_framebuf; } /* Transmit buffer */ @@ -839,7 +839,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL); } if (!txbuf) - goto alloc_fail; + goto release_framebuf; par->txbuf.buf = txbuf; par->txbuf.len = txbuflen; } @@ -875,6 +875,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, return info; +release_framebuf: + framebuffer_release(info); + alloc_fail: vfree(vmem); diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 89c4b5b76ce69d78c11051868958eaf6c0b0e084..0275319906748074fe6cf922719482610a8fd52e 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -241,6 +241,7 @@ struct fbtft_par { ktime_t update_time; bool bgr; void *extra; + bool polarity; }; #define NUMARGS(...) (sizeof((int[]){__VA_ARGS__})/sizeof(int)) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index d22360849b883150cebe2fe653b3d62c625b40ca..d4a7d740fc620188e75034276374b823dd05c304 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -366,8 +366,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, } padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); - if (!padapter->HalData) - DBG_88E("cant not alloc memory for HAL DATA\n"); + if (!padapter->HalData) { + DBG_88E("Failed to allocate memory for HAL data\n"); + goto free_adapter; + } padapter->intf_start = &usb_intf_start; padapter->intf_stop = &usb_intf_stop; diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 22e5116e74f89dac9b8338fcdfa175e3ee04c362..bcdbf38b6916f1b769ccf10a87368d8d9f026a3c 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1673,8 +1673,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) priv->hw->max_signal = 100; - if (vnt_init(priv)) + if (vnt_init(priv)) { + device_free_info(priv); return -ENODEV; + } device_print_info(priv); pci_set_drvdata(pcid, priv); diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 165c46f601dc0906c9a22cc126c607602455223e..a84b318ac899443497f5e7f334a7cc56701bc294 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -489,10 +489,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, /* Set the encryption - we only support wep */ if (is_wep) { if (sme->key) { - if (sme->key_idx >= NUM_WEPKEYS) { - err = -EINVAL; - goto exit; - } + if (sme->key_idx >= NUM_WEPKEYS) + return -EINVAL; result = prism2_domibset_uint32(wlandev, DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index cc38a3509f78ca26f966b521c7207e9d45a9c869..c3d576ed6f13501361a0c6f823c10b0547289310 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1046,27 +1046,6 @@ passthrough_parse_cdb(struct se_cmd *cmd, { unsigned char *cdb = cmd->t_task_cdb; - /* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } - /* * For REPORT LUNS we always need to emulate the response, for everything * else, pass it up. diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 73e5fee6cf1d55ddd67729180564f06559da8b59..83126e2dce36d10b30a7e44b6cc2ed281e983296 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -401,8 +401,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data) rcar_thermal_for_each_priv(priv, common) { if (rcar_thermal_had_changed(priv, status)) { rcar_thermal_irq_disable(priv); - schedule_delayed_work(&priv->work, - msecs_to_jiffies(300)); + queue_delayed_work(system_freezable_wq, &priv->work, + msecs_to_jiffies(300)); } } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 691908225bed52d6b945fbd71a97913f07a6e493..5b6475a8804e6725d9c58e44d324efa28a4e5ad6 100755 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -2373,7 +2373,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_unlock(&thermal_list_lock); - thermal_zone_device_set_polling(NULL, tz, 0); + cancel_delayed_work_sync(&tz->poll_queue); if (tz->type[0]) device_remove_file(&tz->device, &dev_attr_type); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index cba6bc6ab9ed7624a404e577d2e8dde8059fa8ab..c963593eedbe7e76bbae81eec180ddf69d7a1d03 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -95,9 +95,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring) return io; } -static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) +static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) { - iowrite16(value, ring_desc_base(ring) + offset); + /* + * The other 16-bits in the register is read-only and writes to it + * are ignored by the hardware so we can save one ioread32() by + * filling the read-only bits with zeroes. + */ + iowrite32(cons, ring_desc_base(ring) + 8); +} + +static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) +{ + /* See ring_iowrite_cons() above for explanation */ + iowrite32(prod << 16, ring_desc_base(ring) + 8); } static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) @@ -149,7 +160,10 @@ static void ring_write_descriptors(struct tb_ring *ring) descriptor->sof = frame->sof; } ring->head = (ring->head + 1) % ring->size; - ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); + if (ring->is_tx) + ring_iowrite_prod(ring, ring->head); + else + ring_iowrite_cons(ring, ring->head); } } @@ -369,7 +383,7 @@ void ring_stop(struct tb_ring *ring) ring_iowrite32options(ring, 0, 0); ring_iowrite64desc(ring, 0, 0); - ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); + ring_iowrite32desc(ring, 0, 8); ring_iowrite32desc(ring, 0, 12); ring->head = 0; ring->tail = 0; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index ef688aadb03259f963cd3361a9d6fcf64a5ad292..578242239daada7832bd877b78107cc59687a297 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1279,7 +1279,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) atmel_port->hd_start_rx = false; atmel_start_rx(port); - return; } atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 79cad3ff843a65cd3f35060c4612616d9e9fb33c..84f9b5b031e09f9e77ddd2765c915f5c0359d792 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -176,6 +176,7 @@ struct msm_geni_serial_port { int edge_count; bool manual_flow; struct msm_geni_serial_ver_info ver_info; + u32 cur_tx_remaining; }; static const struct uart_ops msm_geni_serial_pops; @@ -781,6 +782,8 @@ static void msm_geni_serial_console_write(struct console *co, const char *s, struct msm_geni_serial_port *port; bool locked = true; unsigned long flags; + unsigned int geni_status; + int irq_en; WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS); @@ -794,6 +797,8 @@ static void msm_geni_serial_console_write(struct console *co, const char *s, else spin_lock_irqsave(&uport->lock, flags); + geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); + /* Cancel the current write to log the fault */ if (!locked) { geni_cancel_m_cmd(uport->membase); @@ -807,9 +812,27 @@ static void msm_geni_serial_console_write(struct console *co, const char *s, } writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + } else if ((geni_status & M_GENI_CMD_ACTIVE) && + !port->cur_tx_remaining) { + /* It seems we can interrupt existing transfers unless all data + * has been sent, in which case we need to look for done first. + */ + msm_geni_serial_poll_cancel_tx(uport); + + /* Enable WATERMARK interrupt for every new console write op */ + if (uart_circ_chars_pending(&uport->state->xmit)) { + irq_en = geni_read_reg_nolog(uport->membase, + SE_GENI_M_IRQ_EN); + geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN, + uport->membase, SE_GENI_M_IRQ_EN); + } } __msm_geni_serial_console_write(uport, s, count); + + if (port->cur_tx_remaining) + msm_geni_serial_setup_tx(uport, port->cur_tx_remaining); + if (locked) spin_unlock_irqrestore(&uport->lock, flags); } @@ -1275,42 +1298,62 @@ static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx) return ret; } -static int msm_geni_serial_handle_tx(struct uart_port *uport) +static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done, + bool active) { int ret = 0; struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); struct circ_buf *xmit = &uport->state->xmit; unsigned int avail_fifo_bytes = 0; unsigned int bytes_remaining = 0; + unsigned int pending; int i = 0; unsigned int tx_fifo_status; unsigned int xmit_size; unsigned int fifo_width_bytes = (uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3)); int temp_tail = 0; + int irq_en; - xmit_size = uart_circ_chars_pending(xmit); tx_fifo_status = geni_read_reg_nolog(uport->membase, SE_GENI_TX_FIFO_STATUS); - /* Both FIFO and framework buffer are drained */ - if (!xmit_size && !tx_fifo_status) { + + /* Complete the current tx command before taking newly added data */ + if (active) + pending = msm_port->cur_tx_remaining; + else + pending = uart_circ_chars_pending(xmit); + + /* All data has been transmitted and acknowledged as received */ + if (!pending && !tx_fifo_status && done) { msm_geni_serial_stop_tx(uport); goto exit_handle_tx; } - avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) * - fifo_width_bytes; - temp_tail = xmit->tail & (UART_XMIT_SIZE - 1); + avail_fifo_bytes = msm_port->tx_fifo_depth - (tx_fifo_status & + TX_FIFO_WC); + avail_fifo_bytes *= fifo_width_bytes; + if (avail_fifo_bytes < 0) + avail_fifo_bytes = 0; - if (xmit_size > (UART_XMIT_SIZE - temp_tail)) - xmit_size = (UART_XMIT_SIZE - temp_tail); - if (xmit_size > avail_fifo_bytes) - xmit_size = avail_fifo_bytes; + temp_tail = xmit->tail; + xmit_size = min(avail_fifo_bytes, pending); if (!xmit_size) goto exit_handle_tx; - msm_geni_serial_setup_tx(uport, xmit_size); + if (!msm_port->cur_tx_remaining) { + msm_geni_serial_setup_tx(uport, pending); + msm_port->cur_tx_remaining = pending; + + /* Re-Enable WATERMARK interrupt while starting new transfer */ + irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); + if (!(irq_en & M_TX_FIFO_WATERMARK_EN)) + geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN, + uport->membase, SE_GENI_M_IRQ_EN); + } + bytes_remaining = xmit_size; + while (i < xmit_size) { unsigned int tx_bytes; unsigned int buf = 0; @@ -1319,20 +1362,39 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport) tx_bytes = ((bytes_remaining < fifo_width_bytes) ? bytes_remaining : fifo_width_bytes); - for (c = 0; c < tx_bytes ; c++) - buf |= (xmit->buf[temp_tail + c] << (c * 8)); + for (c = 0; c < tx_bytes ; c++) { + buf |= (xmit->buf[temp_tail++] << (c * 8)); + temp_tail &= UART_XMIT_SIZE - 1; + } + geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn); + i += tx_bytes; bytes_remaining -= tx_bytes; uport->icount.tx += tx_bytes; - temp_tail += tx_bytes; + msm_port->cur_tx_remaining -= tx_bytes; /* Ensure FIFO write goes through */ wmb(); } - xmit->tail = temp_tail & (UART_XMIT_SIZE - 1); - if (uart_console(uport)) - msm_geni_serial_poll_cancel_tx(uport); + xmit->tail = temp_tail; + + /* + * The tx fifo watermark is level triggered and latched. Though we had + * cleared it in qcom_geni_serial_isr it will have already reasserted + * so we must clear it again here after our writes. + */ + geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase, + SE_GENI_M_IRQ_CLEAR); + exit_handle_tx: + if (!msm_port->cur_tx_remaining) { + /* Clear WATERMARK interrupt for each transfer completion */ + irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); + if (irq_en & M_TX_FIFO_WATERMARK_EN) + geni_write_reg_nolog(irq_en & ~M_TX_FIFO_WATERMARK_EN, + uport->membase, SE_GENI_M_IRQ_EN); + } + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(uport); return ret; @@ -1428,6 +1490,7 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) struct uart_port *uport = dev; unsigned long flags; unsigned int m_irq_en; + unsigned int geni_status; struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); struct tty_port *tport = &uport->state->port; bool drop_rx = false; @@ -1449,6 +1512,7 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN); dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT); dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT); + geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS); geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR); geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR); @@ -1468,8 +1532,10 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev) if (!dma) { if ((m_irq_status & m_irq_en) & - (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN)) - msm_geni_serial_handle_tx(uport); + (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN)) + msm_geni_serial_handle_tx(uport, + m_irq_status & M_CMD_DONE_EN, + geni_status & M_GENI_CMD_ACTIVE); if ((s_irq_status & S_GP_IRQ_0_EN) || (s_irq_status & S_GP_IRQ_1_EN)) { diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 1d9d778828baed6c15e3837b6a6a03a0ad4d5d6c..515bf18c829434d9a8cd635e5577544fe884f4c4 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1635,8 +1635,9 @@ static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s) /* * If something went wrong, rollback. + * Be careful: i may be unsigned. */ - while (err && (--i >= 0)) + while (err && (i-- > 0)) if (irq[i] >= 0) free_irq(irq[i], s); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 82451bb6622bde81cdd464d9428d2d49ab8ea0f2..f80a88d107d7fb72cf47790f6833ab60fe8fbeb5 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -332,6 +332,7 @@ struct sc16is7xx_port { struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work irq_work; + struct mutex efr_lock; struct sc16is7xx_one p[0]; }; @@ -503,6 +504,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) div /= 4; } + /* In an amazing feat of design, the Enhanced Features Register shares + * the address of the Interrupt Identification Register, and is + * switched in by writing a magic value (0xbf) to the Line Control + * Register. Any interrupt firing during this time will see the EFR + * where it expects the IIR to be, leading to "Unexpected interrupt" + * messages. + * + * Prevent this possibility by claiming a mutex while accessing the + * EFR, and claiming the same mutex from within the interrupt handler. + * This is similar to disabling the interrupt, but that doesn't work + * because the bulk of the interrupt processing is run as a workqueue + * job in thread context. + */ + mutex_lock(&s->efr_lock); + lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); /* Open the LCR divisors for configuration */ @@ -518,6 +534,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Put LCR back to the normal mode */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&s->efr_lock); + sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_CLKSEL_BIT, prescaler); @@ -700,6 +718,8 @@ static void sc16is7xx_ist(struct kthread_work *ws) { struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work); + mutex_lock(&s->efr_lock); + while (1) { bool keep_polling = false; int i; @@ -709,6 +729,8 @@ static void sc16is7xx_ist(struct kthread_work *ws) if (!keep_polling) break; } + + mutex_unlock(&s->efr_lock); } static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) @@ -903,6 +925,9 @@ static void sc16is7xx_set_termios(struct uart_port *port, if (!(termios->c_cflag & CREAD)) port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK; + /* As above, claim the mutex while accessing the EFR. */ + mutex_lock(&s->efr_lock); + sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B); @@ -924,6 +949,8 @@ static void sc16is7xx_set_termios(struct uart_port *port, /* Update LCR register */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&s->efr_lock); + /* Get baud rate generator configuration */ baud = uart_get_baud_rate(port, termios, old, port->uartclk / 16 / 4 / 0xffff, @@ -1186,6 +1213,7 @@ static int sc16is7xx_probe(struct device *dev, s->regmap = regmap; s->devtype = devtype; dev_set_drvdata(dev, s); + mutex_init(&s->efr_lock); kthread_init_worker(&s->kworker); kthread_init_work(&s->irq_work, sc16is7xx_ist); diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index d2da6aa7f27d0eb1ccb4c874711446e3952e5367..1bb15edcf1e774d6bed3c2b4b4178540abfda3f6 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -68,6 +68,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set); struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, enum mctrl_gpio_idx gidx) { + if (gpios == NULL) + return NULL; + return gpios->gpio[gidx]; } EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 747560feb63e9a6ca7dfbac8bd591769730bb327..2e34239ac8a911550e1446a58a8d90bedf52f939 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port) if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE | SPRD_LSR_FE | SPRD_LSR_OE)) - if (handle_lsr_errors(port, &lsr, &flag)) + if (handle_lsr_errors(port, &flag, &lsr)) continue; if (uart_handle_sysrq_char(port, ch)) continue; diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 817bb0d3f326ed982631c135ddd5f7a4b170b7a5..9a54aafe840506c518b15a42232ae234339dc773 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -746,7 +746,8 @@ static int __init ulite_init(void) static void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); - uart_unregister_driver(&ulite_uart_driver); + if (ulite_uart_driver.state) + uart_unregister_driver(&ulite_uart_driver); } module_init(ulite_init); diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 7aca2d4670e4a3aaf3636acc5f8dd1c3f288bfd3..e645ee1cfd989563c5e58d09b287c683873f447a 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1187,14 +1187,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slgt_info *info = tty->driver_data; - int rc = -ENOIOCTLCMD; + int rc; if (sanity_check(info, tty->name, "compat_ioctl")) return -ENODEV; DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd)); switch (cmd) { - case MGSL_IOCSPARAMS32: rc = set_params32(info, compat_ptr(arg)); break; @@ -1214,18 +1213,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty, case MGSL_IOCWAITGPIO: case MGSL_IOCGXSYNC: case MGSL_IOCGXCTRL: - case MGSL_IOCSTXIDLE: - case MGSL_IOCTXENABLE: - case MGSL_IOCRXENABLE: - case MGSL_IOCTXABORT: - case TIOCMIWAIT: - case MGSL_IOCSIF: - case MGSL_IOCSXSYNC: - case MGSL_IOCSXCTRL: - rc = ioctl(tty, cmd, arg); + rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); break; + default: + rc = ioctl(tty, cmd, arg); } - DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc)); return rc; } diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index f36a1ac3bfbdfc0861ebebd9fd4114d90452b03a..b8650210be0f765ddf296ebc6fb1d186cb351880 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -206,14 +206,17 @@ static void ci_otg_work(struct work_struct *work) } pm_runtime_get_sync(ci->dev); + if (ci->id_event) { ci->id_event = false; ci_handle_id_switch(ci); - } else if (ci->b_sess_valid_event) { + } + + if (ci->b_sess_valid_event) { ci->b_sess_valid_event = false; ci_handle_vbus_change(ci); - } else - dev_err(ci->dev, "unexpected event occurs at %s\n", __func__); + } + pm_runtime_put_sync(ci->dev); enable_irq(ci->irq); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 20d02a5e418d936231fcb091e183c6f939d16701..c6577f489d0f6d7bd9ed006545c8d30eb89933d7 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -273,6 +273,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data) } else if (data->oc_polarity == 1) { /* High active */ reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY); + } else { + reg &= ~(MX6_BM_OVER_CUR_DIS); } writel(reg, usbmisc->base + data->index * 4); diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 071964c7847f1fbf148c2cb485fe0750e8518adc..07c3c3449147fa6b7c1afbb171c27e8fb2128786 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -458,6 +458,7 @@ static void usblp_cleanup(struct usblp *usblp) kfree(usblp->readbuf); kfree(usblp->device_id_string); kfree(usblp->statusbuf); + usb_put_intf(usblp->intf); kfree(usblp); } @@ -474,10 +475,12 @@ static int usblp_release(struct inode *inode, struct file *file) mutex_lock(&usblp_mutex); usblp->used = 0; - if (usblp->present) { + if (usblp->present) usblp_unlink_urbs(usblp); - usb_autopm_put_interface(usblp->intf); - } else /* finish cleanup from disconnect */ + + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); return 0; @@ -1118,7 +1121,7 @@ static int usblp_probe(struct usb_interface *intf, init_waitqueue_head(&usblp->wwait); init_usb_anchor(&usblp->urbs); usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - usblp->intf = intf; + usblp->intf = usb_get_intf(intf); /* Malloc device ID string buffer to the largest expected length, * since we can re-query it on an ioctl and a dynamic string @@ -1207,6 +1210,7 @@ static int usblp_probe(struct usb_interface *intf, kfree(usblp->readbuf); kfree(usblp->statusbuf); kfree(usblp->device_id_string); + usb_put_intf(usblp->intf); kfree(usblp); abort_ret: return retval; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 2476d660319e7c1c367721dfc7b1aaf0f90129d4..79ae360e7742ff2e91869b808315ac8d5948822a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -343,6 +343,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, /* Validate the wMaxPacketSize field */ maxp = usb_endpoint_maxp(&endpoint->desc); + if (maxp == 0) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ @@ -920,7 +925,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; struct usb_ssp_cap_descriptor *ssp_cap; - unsigned char *buffer; + unsigned char *buffer, *buffer0; int length, total_len, num, i, ssac; __u8 cap_type; int ret; @@ -965,10 +970,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) ret = -ENOMSG; goto err; } + + buffer0 = buffer; total_len -= length; + buffer += length; for (i = 0; i < num; i++) { - buffer += length; cap = (struct usb_dev_cap_header *)buffer; if (total_len < sizeof(*cap) || total_len < cap->bLength) { @@ -982,8 +989,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) break; } - total_len -= length; - if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { dev_warn(ddev, "descriptor type invalid, skip\n"); continue; @@ -1027,7 +1032,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) default: break; } + + total_len -= length; + buffer += length; } + dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); return 0; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index ec056a414091acb4c8a853321021c8e46c3a7382..1b307de2d4896fd681bf83846d97beb652cceff1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); +static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, + u16 portstatus); static inline char *portspeed(struct usb_hub *hub, int portstatus) { @@ -1119,6 +1121,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) USB_PORT_FEAT_ENABLE); } + /* Make sure a warm-reset request is handled by port_event */ + if (type == HUB_RESUME && + hub_port_warm_reset_required(hub, port1, portstatus)) + set_bit(port1, hub->event_bits); + /* * Add debounce if USB3 link is in polling/link training state. * Link will automatically transition to Enabled state after diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 146fe3e6201f40628ddf2b103cc2b2f07be10a8c..66e0112d9983473d62362aa36a706c320bb5a366 100755 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -307,8 +307,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); dft = reg & DWC3_GFLADJ_30MHZ_MASK; - if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj, - "request value same as default, ignoring\n")) { + if (dft != dwc->fladj) { reg &= ~DWC3_GFLADJ_30MHZ_MASK; reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 356358302537c5a75fbceb3378b42a0dd5f4c85b..d29c5837c643edb163635b4062e106b6c03750e5 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -434,6 +434,7 @@ config USB_CONFIGFS_F_PTP config USB_CONFIGFS_F_ACC boolean "Accessory gadget" depends on USB_CONFIGFS + depends on HID=y select USB_F_ACC help USB gadget Accessory support diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c old mode 100644 new mode 100755 index b3925778d17466173577d77c7afcdce1a23c0d41..1a7ebc533fd0e0f833021a1bfdb97c937693ee8f --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -92,6 +92,8 @@ struct gadget_info { bool unbinding; char b_vendor_code; char qw_sign[OS_STRING_QW_SIGN_LEN]; + spinlock_t spinlock; + bool unbind; #ifdef CONFIG_USB_CONFIGFS_UEVENT bool connected; bool sw_connected; @@ -1290,6 +1292,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget, int ret; /* the gi->lock is hold by the caller */ + gi->unbind = 0; cdev->gadget = gadget; set_gadget_data(gadget, cdev); ret = composite_dev_prepare(composite, cdev); @@ -1476,19 +1479,118 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; struct gadget_info *gi; + unsigned long flags; /* the gi->lock is hold by the caller */ cdev = get_gadget_data(gadget); gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + gi->unbind = 1; + spin_unlock_irqrestore(&gi->spinlock, flags); kfree(otg_desc[0]); otg_desc[0] = NULL; purge_configs_funcs(gi); composite_dev_cleanup(cdev); usb_ep_autoconfig_reset(cdev->gadget); + spin_lock_irqsave(&gi->spinlock, flags); cdev->gadget = NULL; set_gadget_data(gadget, NULL); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +#ifndef CONFIG_USB_CONFIGFS_UEVENT +static int configfs_composite_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + int ret; + + cdev = get_gadget_data(gadget); + if (!cdev) + return 0; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return 0; + } + + ret = composite_setup(gadget, ctrl); + spin_unlock_irqrestore(&gi->spinlock, flags); + return ret; +} + +static void configfs_composite_disconnect(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_disconnect(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} +#endif + +static void configfs_composite_suspend(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_suspend(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static void configfs_composite_resume(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_resume(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); } #ifdef CONFIG_USB_CONFIGFS_UEVENT @@ -1592,12 +1694,12 @@ static const struct usb_gadget_driver configfs_driver_template = { .reset = android_disconnect, .disconnect = android_disconnect, #else - .setup = composite_setup, - .reset = composite_disconnect, - .disconnect = composite_disconnect, + .setup = configfs_composite_setup, + .reset = configfs_composite_disconnect, + .disconnect = configfs_composite_disconnect, #endif - .suspend = composite_suspend, - .resume = composite_resume, + .suspend = configfs_composite_suspend, + .resume = configfs_composite_resume, .max_speed = USB_SPEED_SUPER, .driver = { @@ -1729,6 +1831,7 @@ static struct config_group *gadgets_make( gi->composite.resume = NULL; gi->composite.max_speed = USB_SPEED_SUPER; + spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); INIT_LIST_HEAD(&gi->string_list); INIT_LIST_HEAD(&gi->available_func); diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index ae0c12d0e200004b4751f2bb2fa41e02bde68c9f..a36041c79ba94cdd149eca6a2e65a8c45e8d1298 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -543,6 +543,7 @@ static int uvcg_control_class_allow_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } @@ -584,6 +585,7 @@ static int uvcg_control_class_drop_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } @@ -777,6 +779,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src, format_ptr->fmt = target_fmt; list_add_tail(&format_ptr->entry, &src_hdr->formats); ++src_hdr->num_fmt; + ++target_fmt->linked; out: mutex_unlock(&opts->lock); @@ -815,6 +818,8 @@ static int uvcg_streaming_header_drop_link(struct config_item *src, break; } + --target_fmt->linked; + out: mutex_unlock(&opts->lock); mutex_unlock(su_mutex); @@ -2425,6 +2430,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } @@ -2469,6 +2475,7 @@ static int uvcg_streaming_class_drop_link(struct config_item *src, unlock: mutex_unlock(&opts->lock); out: + config_item_put(header); mutex_unlock(su_mutex); return ret; } diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 2ceb3ce406a45a8558260bb6235b3658a647b40b..91fd7821248804b3662f2f2ae9168e3304a243c4 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -129,6 +129,21 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, * Request handling */ +static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req) +{ + int ret; + + ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + if (ret < 0) { + printk(KERN_INFO "Failed to queue request (%d).\n", ret); + /* Isochronous endpoints can't be halted. */ + if (usb_endpoint_xfer_bulk(video->ep->desc)) + usb_ep_set_halt(video->ep); + } + + return ret; +} + /* * I somehow feel that synchronisation won't be easy to achieve here. We have * three events that control USB requests submission: @@ -193,14 +208,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req) video->encode(req, video, buf); - if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { - printk(KERN_INFO "Failed to queue request (%d).\n", ret); - usb_ep_set_halt(ep); - spin_unlock_irqrestore(&video->queue.irqlock, flags); + ret = uvcg_video_ep_queue(video, req); + spin_unlock_irqrestore(&video->queue.irqlock, flags); + + if (ret < 0) { uvcg_queue_cancel(queue, 0); goto requeue; } - spin_unlock_irqrestore(&video->queue.irqlock, flags); return; @@ -324,15 +338,13 @@ int uvcg_video_pump(struct uvc_video *video) video->encode(req, video, buf); /* Queue the USB request */ - ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + ret = uvcg_video_ep_queue(video, req); + spin_unlock_irqrestore(&queue->irqlock, flags); + if (ret < 0) { - printk(KERN_INFO "Failed to queue request (%d)\n", ret); - usb_ep_set_halt(video->ep); - spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_queue_cancel(queue, 0); break; } - spin_unlock_irqrestore(&queue->irqlock, flags); } spin_lock_irqsave(&video->req_lock, flags); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 9705bcdbc577f82bca89aedb8b041edf665219e6..57dd3bad953977738879ee2c22161bb93a348b95 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -403,9 +403,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req) next_fifo_transaction(ep, req); if (req->last_transaction) { usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); - usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); } else { - usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); } } diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 53f9beab6bd6fb456d083657c679039a2b895077..2dd30d0c177a186937a9d42d55b7b855353b54bd 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -106,6 +106,17 @@ int usb_ep_enable(struct usb_ep *ep) if (ep->enabled) goto out; + /* UDC drivers can't handle endpoints with maxpacket size 0 */ + if (usb_endpoint_maxp(ep->desc) == 0) { + /* + * We should log an error message here, but we can't call + * dev_err() because there's no way to find the gadget + * given only ep. + */ + ret = -EINVAL; + goto out; + } + ret = ep->ops->enable(ep, ep->desc); if (ret) goto out; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index ab89fa3b4118bc17a01d7680b29ae900128b2277..2f7023a289c9436ecda648152f84ef4cfb62c11a 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -50,6 +50,7 @@ #define DRIVER_VERSION "02 May 2005" #define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ +#define POWER_BUDGET_3 900 /* in mA */ static const char driver_name[] = "dummy_hcd"; static const char driver_desc[] = "USB Host+Gadget Emulator"; @@ -2433,7 +2434,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd) dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; INIT_LIST_HEAD(&dum_hcd->urbp_list); - dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET; + dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3; dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING; dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1; #ifdef CONFIG_USB_OTG diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 95df2b3bb6a1aaa9bcd8e2394869582b703fa849..76e991557116ab1012186c363e78f4e25add02d8 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -744,7 +744,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210, fotg210->ep0_req->length = 2; spin_unlock(&fotg210->lock); - fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL); + fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC); spin_lock(&fotg210->lock); } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 8991a40707926275948fd3a8f1108f263cbdb54c..bd98557caa28048e8372d795734db782805dc469 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -2570,7 +2570,7 @@ static int fsl_udc_remove(struct platform_device *pdev) dma_pool_destroy(udc_controller->td_pool); free_irq(udc_controller->irq, udc_controller); iounmap(dr_regs); - if (pdata->operating_mode == FSL_USB2_DR_DEVICE) + if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE)) release_mem_region(res->start, resource_size(res)); /* free udc --wait for the release() finished */ diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 6df1aded450396e910dd372f8cf1e5bb7c347f73..ac2aa04ca657363400095aa9002588406938e694 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1178,11 +1178,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); bl = bytes - n; - if (bl > 3) - bl = 3; + if (bl > 4) + bl = 4; for (i = 0; i < bl; i++) - data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF); + data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF); } break; diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 73f763c4f5f591a2c19053083dedf7b6aa252f31..144674913c78850647408d5da0224662f18734c7 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -122,7 +122,9 @@ static void setup_sch_info(struct usb_device *udev, } if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) { - if (esit_pkts <= sch_ep->esit) + if (sch_ep->esit == 1) + sch_ep->pkts = esit_pkts; + else if (esit_pkts <= sch_ep->esit) sch_ep->pkts = 1; else sch_ep->pkts = roundup_pow_of_two(esit_pkts) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a142339d9028f0bde0bd4330969e3491e5f5410b..fd2bcff0ec5791a599632376e318dda6d87e8ce6 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3190,10 +3190,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, if (usb_urb_dir_out(urb)) { len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, new_buff_len, enqd_len); - if (len != seg->bounce_len) + if (len != new_buff_len) xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", - len, seg->bounce_len); + len, new_buff_len); seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 52495fced86d4ca03d6adad3c99bad84995a6db7..919e0b85ba621dda996aabf55ba1ccd6ffbd7e05 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1028,7 +1028,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) command |= CMD_CSS; writel(command, &xhci->op_regs->command); if (xhci_handshake(&xhci->op_regs->status, - STS_SAVE, 0, 10 * 1000)) { + STS_SAVE, 0, 20 * 1000)) { xhci_warn(xhci, "WARN: xHC save state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -1089,6 +1089,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) hibernated = true; if (!hibernated) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. + */ + retval = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + if (retval) { + xhci_warn(xhci, "Controller not ready at resume %d\n", + retval); + spin_unlock_irq(&xhci->lock); + return retval; + } /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ @@ -4556,12 +4568,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, desc, state, timeout); - /* If we found we can't enable hub-initiated LPM, or + /* If we found we can't enable hub-initiated LPM, and * the U1 or U2 exit latency was too high to allow - * device-initiated LPM as well, just stop searching. + * device-initiated LPM as well, then we will disable LPM + * for this device, so stop searching any further. */ - if (alt_timeout == USB3_LPM_DISABLED || - alt_timeout == USB3_LPM_DEVICE_INITIATED) { + if (alt_timeout == USB3_LPM_DISABLED) { *timeout = alt_timeout; return -E2BIG; } @@ -4672,10 +4684,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); if (driver && driver->disable_hub_initiated_lpm) { - dev_dbg(&udev->dev, "Hub-initiated %s disabled " - "at request of driver %s\n", - state_name, driver->name); - return xhci_get_timeout_no_hub_lpm(udev, state); + dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", + state_name, driver->name); + timeout = xhci_get_timeout_no_hub_lpm(udev, + state); + if (timeout == USB3_LPM_DISABLED) + return timeout; } } diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index a4dbb0cd80da191b2d5e7e541c007f2fd1b23b76..0fecc002fa9f12e1a84ddd807d5c092c7ea67dab 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -724,6 +724,10 @@ static int mts_usb_probe(struct usb_interface *intf, } + if (ep_in_current != &ep_in_set[2]) { + MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n"); + return -ENODEV; + } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 47b357760afc245a583e77b5a419b7a6a0b6927b..2de07a3653a0912dc1090a072bafc64e56e5af53 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -46,16 +46,6 @@ config USB_SEVSEG To compile this driver as a module, choose M here: the module will be called usbsevseg. -config USB_RIO500 - tristate "USB Diamond Rio500 support" - help - Say Y here if you want to connect a USB Rio500 mp3 player to your - computer's USB port. Please read - for more information. - - To compile this driver as a module, choose M here: the - module will be called rio500. - config USB_LEGOTOWER tristate "USB Lego Infrared Tower support" help diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 3d1992750da427eee344397e55b1f9ae7385aa7c..2b21872cd733d66ce71c3fc68f23fad2ebc5995c 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o obj-$(CONFIG_USB_LCD) += usblcd.o obj-$(CONFIG_USB_LD) += ldusb.o obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o -obj-$(CONFIG_USB_RIO500) += rio500.o obj-$(CONFIG_USB_TEST) += usbtest.o obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 564268fca07a0209ac093675f355ed59da965ad4..f0c071da68d196b7ac9cadafa327cd28d28a9f9b 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -80,6 +80,7 @@ struct adu_device { char serial_number[8]; int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char *read_buffer_primary; int read_buffer_length; @@ -121,7 +122,7 @@ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; - if (dev->udev == NULL) + if (dev->disconnected) return; /* shutdown transfer */ @@ -151,6 +152,7 @@ static void adu_delete(struct adu_device *dev) kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree(dev); } @@ -244,7 +246,7 @@ static int adu_open(struct inode *inode, struct file *file) } dev = usb_get_intfdata(interface); - if (!dev || !dev->udev) { + if (!dev) { retval = -ENODEV; goto exit_no_device; } @@ -327,7 +329,7 @@ static int adu_release(struct inode *inode, struct file *file) } adu_release_internal(dev); - if (dev->udev == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); @@ -356,7 +358,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, return -ERESTARTSYS; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -525,7 +527,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, goto exit_nolock; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -679,7 +681,7 @@ static int adu_probe(struct usb_interface *interface, mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); @@ -789,19 +791,21 @@ static int adu_probe(struct usb_interface *interface, static void adu_disconnect(struct usb_interface *interface) { struct adu_device *dev; - int minor; dev = usb_get_intfdata(interface); - mutex_lock(&dev->mtx); /* not interruptible */ - dev->udev = NULL; /* poison */ - minor = dev->minor; usb_deregister_dev(interface, &adu_class); - mutex_unlock(&dev->mtx); + + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); + mutex_lock(&dev->mtx); /* not interruptible */ + dev->disconnected = 1; + mutex_unlock(&dev->mtx); + /* if the device is not opened, then we clean up right now */ if (!dev->open_count) adu_delete(dev); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index b8092bcf89a2928f338aafdb4d5e2d2d40f1ab10..32dc0d9f0519e76f8af44df6034ca1961992d85e 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -160,8 +160,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd) pdata->msgdata, 2, ACD_USB_TIMEOUT); mutex_unlock(&pdata->sysfslock); - - return retval; + + if (retval < 0) + return retval; + else + return 0; } static int appledisplay_bl_get_brightness(struct backlight_device *bd) @@ -179,7 +182,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) 0, pdata->msgdata, 2, ACD_USB_TIMEOUT); - brightness = pdata->msgdata[1]; + if (retval < 2) { + if (retval >= 0) + retval = -EMSGSIZE; + } else { + brightness = pdata->msgdata[1]; + } mutex_unlock(&pdata->sysfslock); if (retval < 0) @@ -321,6 +329,7 @@ static int appledisplay_probe(struct usb_interface *iface, if (pdata) { if (pdata->urb) { usb_kill_urb(pdata->urb); + cancel_delayed_work_sync(&pdata->work); if (pdata->urbdata) usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, pdata->urbdata, pdata->urb->transfer_dma); diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index efecb87428b1c5d9f4ab12f55efba00ef3f8ca02..b694e200a98e1885307c0b8a80108e0e8f8bb55b 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -108,6 +108,7 @@ static void chaoskey_free(struct chaoskey *dev) usb_free_urb(dev->urb); kfree(dev->name); kfree(dev->buf); + usb_put_intf(dev->interface); kfree(dev); } } @@ -157,6 +158,8 @@ static int chaoskey_probe(struct usb_interface *interface, if (dev == NULL) goto out; + dev->interface = usb_get_intf(interface); + dev->buf = kmalloc(size, GFP_KERNEL); if (dev->buf == NULL) @@ -190,8 +193,6 @@ static int chaoskey_probe(struct usb_interface *interface, strcat(dev->name, udev->serial); } - dev->interface = interface; - dev->in_ep = in_ep; if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) @@ -411,13 +412,17 @@ static int _chaoskey_fill(struct chaoskey *dev) !dev->reading, (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); - if (result < 0) + if (result < 0) { + usb_kill_urb(dev->urb); goto out; + } - if (result == 0) + if (result == 0) { result = -ETIMEDOUT; - else + usb_kill_urb(dev->urb); + } else { result = dev->valid; + } out: /* Let the device go back to sleep eventually */ usb_autopm_put_interface(dev->interface); @@ -553,7 +558,21 @@ static int chaoskey_suspend(struct usb_interface *interface, static int chaoskey_resume(struct usb_interface *interface) { + struct chaoskey *dev; + struct usb_device *udev = interface_to_usbdev(interface); + usb_dbg(interface, "resume"); + dev = usb_get_intfdata(interface); + + /* + * We may have lost power. + * In that case the device that needs a long time + * for the first requests needs an extended timeout + * again + */ + if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) + dev->reads_started = false; + return 0; } #else diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 318e087f84421efabf7a831fbaea6e4894403b25..1b83946bfb18d7fbfae5661e9d9ccaa6d2a7b4ef 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -89,6 +89,7 @@ struct iowarrior { char chip_serial[9]; /* the serial number string of the chip connected */ int report_size; /* number of bytes in a report */ u16 product_id; + struct usb_anchor submitted; }; /*--------------*/ @@ -248,6 +249,7 @@ static inline void iowarrior_delete(struct iowarrior *dev) kfree(dev->int_in_buffer); usb_free_urb(dev->int_in_urb); kfree(dev->read_queue); + usb_put_intf(dev->interface); kfree(dev); } @@ -434,11 +436,13 @@ static ssize_t iowarrior_write(struct file *file, retval = -EFAULT; goto error; } + usb_anchor_urb(int_out_urb, &dev->submitted); retval = usb_submit_urb(int_out_urb, GFP_KERNEL); if (retval) { dev_dbg(&dev->interface->dev, "submit error %d for urb nr.%d\n", retval, atomic_read(&dev->write_busy)); + usb_unanchor_urb(int_out_urb); goto error; } /* submit was ok */ @@ -776,11 +780,13 @@ static int iowarrior_probe(struct usb_interface *interface, init_waitqueue_head(&dev->write_wait); dev->udev = udev; - dev->interface = interface; + dev->interface = usb_get_intf(interface); iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + init_usb_anchor(&dev->submitted); + /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; @@ -886,8 +892,6 @@ static void iowarrior_disconnect(struct usb_interface *interface) dev = usb_get_intfdata(interface); mutex_lock(&iowarrior_open_disc_lock); usb_set_intfdata(interface, NULL); - /* prevent device read, write and ioctl */ - dev->present = 0; minor = dev->minor; mutex_unlock(&iowarrior_open_disc_lock); @@ -898,8 +902,7 @@ static void iowarrior_disconnect(struct usb_interface *interface) mutex_lock(&dev->mutex); /* prevent device read, write and ioctl */ - - mutex_unlock(&dev->mutex); + dev->present = 0; if (dev->opened) { /* There is a process that holds a filedescriptor to the device , @@ -907,10 +910,13 @@ static void iowarrior_disconnect(struct usb_interface *interface) Deleting the device is postponed until close() was called. */ usb_kill_urb(dev->int_in_urb); + usb_kill_anchored_urbs(&dev->submitted); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); + mutex_unlock(&dev->mutex); } else { /* no process is using the device, cleanup now */ + mutex_unlock(&dev->mutex); iowarrior_delete(dev); } diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index c5e3032a4d6b4c53b86ce3eff4d51f8f98b9c236..0f0cdb7b7bf8e1073308b1463bdebeb6790167eb 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -158,6 +158,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in struct ld_usb { struct mutex mutex; /* locks this structure */ struct usb_interface* intf; /* save off the usb interface pointer */ + unsigned long disconnected:1; int open_count; /* number of times this port has been opened */ @@ -197,12 +198,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; - if (dev->intf) - usb_kill_urb(dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } if (dev->interrupt_out_busy) - if (dev->intf) - usb_kill_urb(dev->interrupt_out_urb); + usb_kill_urb(dev->interrupt_out_urb); } /** @@ -210,8 +209,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) */ static void ld_usb_delete(struct ld_usb *dev) { - ld_usb_abort_transfers(dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); @@ -267,7 +264,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) { + if (dev->interrupt_in_running && !dev->buffer_overflow) { retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, @@ -387,16 +384,13 @@ static int ld_usb_release(struct inode *inode, struct file *file) goto exit; } - if (mutex_lock_interruptible(&dev->mutex)) { - retval = -ERESTARTSYS; - goto exit; - } + mutex_lock(&dev->mutex); if (dev->open_count != 1) { retval = -ENODEV; goto unlock_exit; } - if (dev->intf == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ mutex_unlock(&dev->mutex); /* unlock here as ld_usb_delete frees dev */ @@ -427,7 +421,7 @@ static unsigned int ld_usb_poll(struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->intf) + if (dev->disconnected) return POLLERR | POLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -466,7 +460,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -474,7 +468,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, /* wait for data */ spin_lock_irq(&dev->rbsl); - if (dev->ring_head == dev->ring_tail) { + while (dev->ring_head == dev->ring_tail) { dev->interrupt_in_done = 0; spin_unlock_irq(&dev->rbsl); if (file->f_flags & O_NONBLOCK) { @@ -484,12 +478,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done); if (retval < 0) goto unlock_exit; - } else { - spin_unlock_irq(&dev->rbsl); + + spin_lock_irq(&dev->rbsl); } + spin_unlock_irq(&dev->rbsl); /* actual_buffer contains actual_length + interrupt_in_buffer */ actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_tail*(sizeof(size_t)+dev->interrupt_in_endpoint_size)); + if (*actual_buffer > dev->interrupt_in_endpoint_size) { + retval = -EIO; + goto unlock_exit; + } bytes_to_read = min(count, *actual_buffer); if (bytes_to_read < *actual_buffer) dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n", @@ -500,11 +499,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = -EFAULT; goto unlock_exit; } - dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size; - retval = bytes_to_read; spin_lock_irq(&dev->rbsl); + dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size; + if (dev->buffer_overflow) { dev->buffer_overflow = 0; spin_unlock_irq(&dev->rbsl); @@ -546,7 +545,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -585,7 +584,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, 1 << 8, 0, dev->interrupt_out_buffer, bytes_to_write, - USB_CTRL_SET_TIMEOUT * HZ); + USB_CTRL_SET_TIMEOUT); if (retval < 0) dev_err(&dev->intf->dev, "Couldn't submit HID_REQ_SET_REPORT %d\n", @@ -705,7 +704,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id * dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n"); dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); - dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL); + dev->ring_buffer = kcalloc(ring_buffer_size, + sizeof(size_t) + dev->interrupt_in_endpoint_size, + GFP_KERNEL); if (!dev->ring_buffer) goto error; dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL); @@ -768,6 +769,9 @@ static void ld_usb_disconnect(struct usb_interface *intf) /* give back our minor */ usb_deregister_dev(intf, &ld_usb_class); + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->mutex); /* if the device is not opened, then we clean up right now */ @@ -775,7 +779,7 @@ static void ld_usb_disconnect(struct usb_interface *intf) mutex_unlock(&dev->mutex); ld_usb_delete(dev); } else { - dev->intf = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index c2e2b2ea32d80e7966bdf7becbb4f8728fa42e97..7cac3ee09b09da21e4e70c14a69000499719aaae 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -185,7 +185,6 @@ static const struct usb_device_id tower_table[] = { }; MODULE_DEVICE_TABLE (usb, tower_table); -static DEFINE_MUTEX(open_disc_mutex); #define LEGO_USB_TOWER_MINOR_BASE 160 @@ -197,6 +196,7 @@ struct lego_usb_tower { unsigned char minor; /* the starting minor number for this device */ int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char* read_buffer; size_t read_buffer_length; /* this much came in */ @@ -296,14 +296,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev, */ static inline void tower_delete (struct lego_usb_tower *dev) { - tower_abort_transfers (dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree (dev->read_buffer); kfree (dev->interrupt_in_buffer); kfree (dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree (dev); } @@ -338,18 +337,14 @@ static int tower_open (struct inode *inode, struct file *file) goto exit; } - mutex_lock(&open_disc_mutex); dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&open_disc_mutex); retval = -ENODEV; goto exit; } /* lock this device */ if (mutex_lock_interruptible(&dev->lock)) { - mutex_unlock(&open_disc_mutex); retval = -ERESTARTSYS; goto exit; } @@ -357,12 +352,9 @@ static int tower_open (struct inode *inode, struct file *file) /* allow opening only once */ if (dev->open_count) { - mutex_unlock(&open_disc_mutex); retval = -EBUSY; goto unlock_exit; } - dev->open_count = 1; - mutex_unlock(&open_disc_mutex); /* reset the tower */ result = usb_control_msg (dev->udev, @@ -402,13 +394,14 @@ static int tower_open (struct inode *inode, struct file *file) dev_err(&dev->udev->dev, "Couldn't submit interrupt_in_urb %d\n", retval); dev->interrupt_in_running = 0; - dev->open_count = 0; goto unlock_exit; } /* save device in the file's private structure */ file->private_data = dev; + dev->open_count = 1; + unlock_exit: mutex_unlock(&dev->lock); @@ -429,22 +422,19 @@ static int tower_release (struct inode *inode, struct file *file) if (dev == NULL) { retval = -ENODEV; - goto exit_nolock; - } - - mutex_lock(&open_disc_mutex); - if (mutex_lock_interruptible(&dev->lock)) { - retval = -ERESTARTSYS; goto exit; } + mutex_lock(&dev->lock); + if (dev->open_count != 1) { dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n", __func__); retval = -ENODEV; goto unlock_exit; } - if (dev->udev == NULL) { + + if (dev->disconnected) { /* the device was unplugged before the file was released */ /* unlock here as tower_delete frees dev */ @@ -462,10 +452,7 @@ static int tower_release (struct inode *inode, struct file *file) unlock_exit: mutex_unlock(&dev->lock); - exit: - mutex_unlock(&open_disc_mutex); -exit_nolock: return retval; } @@ -483,10 +470,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev) if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; mb(); - if (dev->udev) - usb_kill_urb (dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } - if (dev->interrupt_out_busy && dev->udev) + if (dev->interrupt_out_busy) usb_kill_urb(dev->interrupt_out_urb); } @@ -522,7 +508,7 @@ static unsigned int tower_poll (struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->udev) + if (dev->disconnected) return POLLERR | POLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -569,7 +555,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -655,7 +641,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -764,7 +750,7 @@ static void tower_interrupt_in_callback (struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && dev->udev) { + if (dev->interrupt_in_running) { retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC); if (retval) dev_err(&dev->udev->dev, @@ -830,8 +816,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device mutex_init(&dev->lock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); dev->open_count = 0; + dev->disconnected = 0; dev->read_buffer = NULL; dev->read_buffer_length = 0; @@ -911,8 +898,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device get_version_reply, sizeof(*get_version_reply), 1000); - if (result < 0) { - dev_err(idev, "LEGO USB Tower get version control request failed\n"); + if (result != sizeof(*get_version_reply)) { + if (result >= 0) + result = -EIO; + dev_err(idev, "get version request failed: %d\n", result); retval = result; goto error; } @@ -930,7 +919,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device if (retval) { /* something prevented us from registering this driver */ dev_err(idev, "Not able to get a minor for this device.\n"); - usb_set_intfdata (interface, NULL); goto error; } dev->minor = interface->minor; @@ -962,23 +950,24 @@ static void tower_disconnect (struct usb_interface *interface) int minor; dev = usb_get_intfdata (interface); - mutex_lock(&open_disc_mutex); - usb_set_intfdata (interface, NULL); minor = dev->minor; - /* give back our minor */ + /* give back our minor and prevent further open() */ usb_deregister_dev (interface, &tower_class); + /* stop I/O */ + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->lock); - mutex_unlock(&open_disc_mutex); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) { mutex_unlock(&dev->lock); tower_delete (dev); } else { - dev->udev = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c deleted file mode 100644 index 6e761fabffca064a965c487703e930e0b29055cf..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/rio500.c +++ /dev/null @@ -1,578 +0,0 @@ -/* -*- linux-c -*- */ - -/* - * Driver for USB Rio 500 - * - * Cesar Miquel (miquel@df.uba.ar) - * - * based on hp_scanner.c by David E. Nelson (dnelson@jump.net) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee). - * - * Changelog: - * 30/05/2003 replaced lock/unlock kernel with up/down - * Daniele Bellucci bellucda@tiscali.it - * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "rio500_usb.h" - -/* - * Version Information - */ -#define DRIVER_VERSION "v1.1" -#define DRIVER_AUTHOR "Cesar Miquel " -#define DRIVER_DESC "USB Rio 500 driver" - -#define RIO_MINOR 64 - -/* stall/wait timeout for rio */ -#define NAK_TIMEOUT (HZ) - -#define IBUF_SIZE 0x1000 - -/* Size of the rio buffer */ -#define OBUF_SIZE 0x10000 - -struct rio_usb_data { - struct usb_device *rio_dev; /* init: probe_rio */ - unsigned int ifnum; /* Interface number of the USB device */ - int isopen; /* nz if open */ - int present; /* Device is present on the bus */ - char *obuf, *ibuf; /* transfer buffers */ - char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ - wait_queue_head_t wait_q; /* for timeouts */ - struct mutex lock; /* general race avoidance */ -}; - -static DEFINE_MUTEX(rio500_mutex); -static struct rio_usb_data rio_instance; - -static int open_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - /* against disconnect() */ - mutex_lock(&rio500_mutex); - mutex_lock(&(rio->lock)); - - if (rio->isopen || !rio->present) { - mutex_unlock(&(rio->lock)); - mutex_unlock(&rio500_mutex); - return -EBUSY; - } - rio->isopen = 1; - - init_waitqueue_head(&rio->wait_q); - - mutex_unlock(&(rio->lock)); - - dev_info(&rio->rio_dev->dev, "Rio opened.\n"); - mutex_unlock(&rio500_mutex); - - return 0; -} - -static int close_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - /* against disconnect() */ - mutex_lock(&rio500_mutex); - mutex_lock(&(rio->lock)); - - rio->isopen = 0; - if (!rio->present) { - /* cleanup has been delayed */ - kfree(rio->ibuf); - kfree(rio->obuf); - rio->ibuf = NULL; - rio->obuf = NULL; - } else { - dev_info(&rio->rio_dev->dev, "Rio closed.\n"); - } - mutex_unlock(&(rio->lock)); - mutex_unlock(&rio500_mutex); - return 0; -} - -static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct RioCommand rio_cmd; - struct rio_usb_data *rio = &rio_instance; - void __user *data; - unsigned char *buffer; - int result, requesttype; - int retries; - int retval=0; - - mutex_lock(&(rio->lock)); - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - retval = -ENODEV; - goto err_out; - } - - switch (cmd) { - case RIO_RECV_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - retval = -EFAULT; - free_page((unsigned long) buffer); - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_IN | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_rcvctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d (data=%02x)\n", - result, buffer[0]); - if (copy_to_user(rio_cmd.buffer, buffer, - rio_cmd.length)) { - free_page((unsigned long) buffer); - retval = -EFAULT; - goto err_out; - } - retries = 0; - } - - /* rio_cmd.buffer contains a raw stream of single byte - data which has been returned from rio. Data is - interpreted at application level. For data that - will be cast to data types longer than 1 byte, data - will be little_endian and will potentially need to - be swapped at the app level */ - - } - free_page((unsigned long) buffer); - break; - - case RIO_SEND_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - free_page((unsigned long)buffer); - retval = -EFAULT; - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_OUT | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_sndctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d\n", result); - retries = 0; - - } - - } - free_page((unsigned long) buffer); - break; - - default: - retval = -ENOTTY; - break; - } - - -err_out: - mutex_unlock(&(rio->lock)); - return retval; -} - -static ssize_t -write_rio(struct file *file, const char __user *buffer, - size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - - unsigned long copy_size; - unsigned long bytes_written = 0; - unsigned int partial; - - int result = 0; - int maxretry; - int errn = 0; - int intr; - - intr = mutex_lock_interruptible(&(rio->lock)); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - - - - do { - unsigned long thistime; - char *obuf = rio->obuf; - - thistime = copy_size = - (count >= OBUF_SIZE) ? OBUF_SIZE : count; - if (copy_from_user(rio->obuf, buffer, copy_size)) { - errn = -EFAULT; - goto error; - } - maxretry = 5; - while (thistime) { - if (!rio->rio_dev) { - errn = -ENODEV; - goto error; - } - if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); - return bytes_written ? bytes_written : -EINTR; - } - - result = usb_bulk_msg(rio->rio_dev, - usb_sndbulkpipe(rio->rio_dev, 2), - obuf, thistime, &partial, 5000); - - dev_dbg(&rio->rio_dev->dev, - "write stats: result:%d thistime:%lu partial:%u\n", - result, thistime, partial); - - if (result == -ETIMEDOUT) { /* NAK - so hold for a while */ - if (!maxretry--) { - errn = -ETIME; - goto error; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (!result && partial) { - obuf += partial; - thistime -= partial; - } else - break; - } - if (result) { - dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n", - result); - errn = -EIO; - goto error; - } - bytes_written += copy_size; - count -= copy_size; - buffer += copy_size; - } while (count > 0); - - mutex_unlock(&(rio->lock)); - - return bytes_written ? bytes_written : -EIO; - -error: - mutex_unlock(&(rio->lock)); - return errn; -} - -static ssize_t -read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - ssize_t read_count; - unsigned int partial; - int this_read; - int result; - int maxretry = 10; - char *ibuf; - int intr; - - intr = mutex_lock_interruptible(&(rio->lock)); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - - ibuf = rio->ibuf; - - read_count = 0; - - - while (count > 0) { - if (signal_pending(current)) { - mutex_unlock(&(rio->lock)); - return read_count ? read_count : -EINTR; - } - if (!rio->rio_dev) { - mutex_unlock(&(rio->lock)); - return -ENODEV; - } - this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; - - result = usb_bulk_msg(rio->rio_dev, - usb_rcvbulkpipe(rio->rio_dev, 1), - ibuf, this_read, &partial, - 8000); - - dev_dbg(&rio->rio_dev->dev, - "read stats: result:%d this_read:%u partial:%u\n", - result, this_read, partial); - - if (partial) { - count = this_read = partial; - } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ - if (!maxretry--) { - mutex_unlock(&(rio->lock)); - dev_err(&rio->rio_dev->dev, - "read_rio: maxretry timeout\n"); - return -ETIME; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (result != -EREMOTEIO) { - mutex_unlock(&(rio->lock)); - dev_err(&rio->rio_dev->dev, - "Read Whoops - result:%u partial:%u this_read:%u\n", - result, partial, this_read); - return -EIO; - } else { - mutex_unlock(&(rio->lock)); - return (0); - } - - if (this_read) { - if (copy_to_user(buffer, ibuf, this_read)) { - mutex_unlock(&(rio->lock)); - return -EFAULT; - } - count -= this_read; - read_count += this_read; - buffer += this_read; - } - } - mutex_unlock(&(rio->lock)); - return read_count; -} - -static const struct file_operations usb_rio_fops = { - .owner = THIS_MODULE, - .read = read_rio, - .write = write_rio, - .unlocked_ioctl = ioctl_rio, - .open = open_rio, - .release = close_rio, - .llseek = noop_llseek, -}; - -static struct usb_class_driver usb_rio_class = { - .name = "rio500%d", - .fops = &usb_rio_fops, - .minor_base = RIO_MINOR, -}; - -static int probe_rio(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct rio_usb_data *rio = &rio_instance; - int retval = 0; - - mutex_lock(&rio500_mutex); - if (rio->present) { - dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum); - retval = -EBUSY; - goto bail_out; - } else { - dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); - } - - retval = usb_register_dev(intf, &usb_rio_class); - if (retval) { - dev_err(&dev->dev, - "Not able to get a minor for this device.\n"); - retval = -ENOMEM; - goto bail_out; - } - - rio->rio_dev = dev; - - if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the output buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - retval = -ENOMEM; - goto bail_out; - } - dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf); - - if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the input buffer\n"); - usb_deregister_dev(intf, &usb_rio_class); - kfree(rio->obuf); - retval = -ENOMEM; - goto bail_out; - } - dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf); - - mutex_init(&(rio->lock)); - - usb_set_intfdata (intf, rio); - rio->present = 1; -bail_out: - mutex_unlock(&rio500_mutex); - - return retval; -} - -static void disconnect_rio(struct usb_interface *intf) -{ - struct rio_usb_data *rio = usb_get_intfdata (intf); - - usb_set_intfdata (intf, NULL); - mutex_lock(&rio500_mutex); - if (rio) { - usb_deregister_dev(intf, &usb_rio_class); - - mutex_lock(&(rio->lock)); - if (rio->isopen) { - rio->isopen = 0; - /* better let it finish - the release will do whats needed */ - rio->rio_dev = NULL; - mutex_unlock(&(rio->lock)); - mutex_unlock(&rio500_mutex); - return; - } - kfree(rio->ibuf); - kfree(rio->obuf); - - dev_info(&intf->dev, "USB Rio disconnected.\n"); - - rio->present = 0; - mutex_unlock(&(rio->lock)); - } - mutex_unlock(&rio500_mutex); -} - -static const struct usb_device_id rio_table[] = { - { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ - { } /* Terminating entry */ -}; - -MODULE_DEVICE_TABLE (usb, rio_table); - -static struct usb_driver rio_driver = { - .name = "rio500", - .probe = probe_rio, - .disconnect = disconnect_rio, - .id_table = rio_table, -}; - -module_usb_driver(rio_driver); - -MODULE_AUTHOR( DRIVER_AUTHOR ); -MODULE_DESCRIPTION( DRIVER_DESC ); -MODULE_LICENSE("GPL"); - diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h deleted file mode 100644 index 359abc98e7061eeea9f6380ed337a1feb7da296d..0000000000000000000000000000000000000000 --- a/drivers/usb/misc/rio500_usb.h +++ /dev/null @@ -1,37 +0,0 @@ -/* ---------------------------------------------------------------------- - - Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - - ---------------------------------------------------------------------- */ - - - -#define RIO_SEND_COMMAND 0x1 -#define RIO_RECV_COMMAND 0x2 - -#define RIO_DIR_OUT 0x0 -#define RIO_DIR_IN 0x1 - -struct RioCommand { - short length; - int request; - int requesttype; - int value; - int index; - void __user *buffer; - int timeout; -}; diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 9f48419abc46b97ebdb562fa4a0b606e7223d67e..35a736eaf864fdda8741a4b65a4a565c9437b7ac 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,8 @@ struct usb_lcd { using up all RAM */ struct usb_anchor submitted; /* URBs to wait for before suspend */ + struct rw_semaphore io_rwsem; + unsigned long disconnected:1; }; #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref) @@ -141,6 +144,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, dev = file->private_data; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto out_up_io; + } + /* do a blocking bulk read to get data from the device */ retval = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, @@ -157,6 +167,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, retval = bytes_read; } +out_up_io: + up_read(&dev->io_rwsem); + return retval; } @@ -236,11 +249,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, if (r < 0) return -EINTR; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto err_up_io; + } + /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; - goto err_no_buf; + goto err_up_io; } buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, @@ -277,6 +297,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, the USB core will eventually free it entirely */ usb_free_urb(urb); + up_read(&dev->io_rwsem); exit: return count; error_unanchor: @@ -284,7 +305,8 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, error: usb_free_coherent(dev->udev, count, buf, urb->transfer_dma); usb_free_urb(urb); -err_no_buf: +err_up_io: + up_read(&dev->io_rwsem); up(&dev->limit_sem); return retval; } @@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface, goto error; kref_init(&dev->kref); sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES); + init_rwsem(&dev->io_rwsem); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); @@ -432,6 +455,12 @@ static void lcd_disconnect(struct usb_interface *interface) /* give back our minor */ usb_deregister_dev(interface, &lcd_class); + down_write(&dev->io_rwsem); + dev->disconnected = 1; + up_write(&dev->io_rwsem); + + usb_kill_anchored_urbs(&dev->submitted); + /* decrement our usage count */ kref_put(&dev->kref, lcd_delete); diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 9744e5f996c1ae9c699626295e3f03c87574bb89..2350502f90540195c8e81a5f89ef9bbe8dbaea48 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -64,6 +64,7 @@ struct usb_yurex { struct kref kref; struct mutex io_mutex; + unsigned long disconnected:1; struct fasync_struct *async_queue; wait_queue_head_t waitq; @@ -111,6 +112,7 @@ static void yurex_delete(struct kref *kref) dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev); } @@ -136,6 +138,7 @@ static void yurex_interrupt(struct urb *urb) switch (status) { case 0: /*success*/ break; + /* The device is terminated or messed up, give up */ case -EOVERFLOW: dev_err(&dev->interface->dev, "%s - overflow with length %d, actual length is %d\n", @@ -144,12 +147,13 @@ static void yurex_interrupt(struct urb *urb) case -ENOENT: case -ESHUTDOWN: case -EILSEQ: - /* The device is terminated, clean up */ + case -EPROTO: + case -ETIME: return; default: dev_err(&dev->interface->dev, "%s - unknown status received: %d\n", __func__, status); - goto exit; + return; } /* handle received message */ @@ -181,7 +185,6 @@ static void yurex_interrupt(struct urb *urb) break; } -exit: retval = usb_submit_urb(dev->urb, GFP_ATOMIC); if (retval) { dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n", @@ -208,7 +211,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ iface_desc = interface->cur_altsetting; @@ -325,8 +328,9 @@ static void yurex_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ usb_poison_urb(dev->urb); + usb_poison_urb(dev->cntl_urb); mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); /* wakeup waiters */ @@ -414,7 +418,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, dev = file->private_data; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); return -ENODEV; } @@ -449,7 +453,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, goto error; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h index 8c5fc12ad7781f7a83ef9ec142107fd542b1e0f8..b8620aa6b72e603abeeb85216438d6b0077abb30 100644 --- a/drivers/usb/renesas_usbhs/common.h +++ b/drivers/usb/renesas_usbhs/common.h @@ -213,6 +213,7 @@ struct usbhs_priv; /* DCPCTR */ #define BSTS (1 << 15) /* Buffer Status */ #define SUREQ (1 << 14) /* Sending SETUP Token */ +#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */ #define CSSTS (1 << 12) /* CSSTS Status */ #define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */ #define SQCLR (1 << 8) /* Toggle Bit Clear */ diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 696560529e6ab0296518955d585132fe2feacc5f..f6a1ae8abb21435e317e3accbc19cd2b7c788f96 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -98,7 +98,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) list_del_init(&pkt->node); } -static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) { if (list_empty(&pipe->list)) return NULL; diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index 8b98507d7abc0ea43e9c4931e68971689fcae20a..c1fb39252b23ec1dfcfd434d9dd1ec36a1fc5d31 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -106,5 +106,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, void *buf, int len, int zero, int sequence); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt); void usbhs_pkt_start(struct usbhs_pipe *pipe); +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe); #endif /* RENESAS_USB_FIFO_H */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 5984fb134cf434a661aea744006530fbe6027fb2..6898ca1ef98c097a8e8f2b98fbf080797bb041bd 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -729,8 +729,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; - - usbhsg_pipe_disable(uep); + int ret = 0; dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); @@ -738,6 +737,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) /******************** spin lock ********************/ usbhs_lock(priv, flags); + /* + * According to usb_ep_set_halt()'s description, this function should + * return -EAGAIN if the IN endpoint has any queue or data. Note + * that the usbhs_pipe_is_dir_in() returns false if the pipe is an + * IN endpoint in the gadget mode. + */ + if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) || + usbhs_pipe_contains_transmittable_data(pipe))) { + ret = -EAGAIN; + goto out; + } + if (halt) usbhs_pipe_stall(pipe); else @@ -748,10 +759,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); +out: usbhs_unlock(priv, flags); /******************** spin unlock ******************/ - return 0; + return ret; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 9396a8c14af80104e8841d98ca30d27ffb1fd88d..8db4ca7d5d45163624c82f9798f803e37efd1eef 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -286,6 +286,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe) return -EBUSY; } +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe) +{ + u16 val; + + /* Do not support for DCP pipe */ + if (usbhs_pipe_is_dcp(pipe)) + return false; + + val = usbhsp_pipectrl_get(pipe); + if (val & INBUFM) + return true; + + return false; +} + /* * PID ctrl */ diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 95185fdb29b1532cfe7f6e0edecd1a25d0c54742..e4144704ee027b606eb6c2c10cf52732a793f56c 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -90,6 +90,7 @@ void usbhs_pipe_init(struct usbhs_priv *priv, int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe); void usbhs_pipe_clear(struct usbhs_pipe *pipe); int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe); +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe); void usbhs_pipe_enable(struct usbhs_pipe *pipe); void usbhs_pipe_disable(struct usbhs_pipe *pipe); void usbhs_pipe_stall(struct usbhs_pipe *pipe); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 109f5da2486fb40305957ca386fda802c2e93d0a..71a951f9dbe8989bb05aa8793df70b84280829c6 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index bbeeb2bd55a83cebf4cfec9b4b8f2876edde1ca0..2c915be1db4ce3599580823f9b96d610fb202182 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -773,7 +773,7 @@ static void cypress_send(struct usb_serial_port *port) usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), - port->interrupt_out_buffer, port->interrupt_out_size, + port->interrupt_out_buffer, actual_size, cypress_write_int_callback, port, priv->write_urb_interval); result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC); if (result) { diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 63ff1a4f2e41411a832daa2d398557790c9f865e..f94d11615b45b7ff79246ea7b35f6f819d560ef7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1025,6 +1025,9 @@ static const struct usb_device_id id_table_combined[] = { /* EZPrototypes devices */ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) }, { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) }, + /* Sienna devices */ + { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, + { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index ed6b36674c1595b60f805257d02eae80c7ab1c56..2e8161f79b49c2d151350f36ffa7a19dd8928de2 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -38,6 +38,9 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* Sienna Serial Interface by Secyourit GmbH */ +#define FTDI_SIENNA_PID 0x8348 + /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ #define CYBER_CORTEX_AV_PID 0x8698 @@ -687,6 +690,12 @@ #define BANDB_TTL3USB9M_PID 0xAC50 #define BANDB_ZZ_PROG1_USB_PID 0xBA02 +/* + * Echelon USB Serial Interface + */ +#define ECHELON_VID 0x0920 +#define ECHELON_U20_PID 0x7500 + /* * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI */ diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 1f9414bdd64989c65f3c9eff22190bbb8d73b9fe..185ef1d8c6cdef360fde6f11e9b3d89da21b5b6f 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1250,8 +1250,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint, ep_desc = find_ep(serial, endpoint); if (!ep_desc) { - /* leak the urb, something's wrong and the callers don't care */ - return urb; + usb_free_urb(urb); + return NULL; } if (usb_endpoint_xfer_int(ep_desc)) { ep_type_name = "INT"; diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index ea20322e1416be6f6bda10a612ae01c74d201d3e..14b45f3e6388a99f5627a7a2bb3a4514858de8b3 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1941,10 +1941,6 @@ static int mos7720_startup(struct usb_serial *serial) } } - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); - #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT if (product == MOSCHIP_DEVICE_ID_7715) { ret_val = mos7715_parport_init(serial); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 03d63bad6be48e6116d8e6bdd9d9db8d356d51dc..0c92252c93163b165b755c733199af9203bc4abd 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -131,11 +131,15 @@ /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 * ATEN UC2322 device using Moschip MCS7820 + * MOXA UPort 2210 device using Moschip MCS7820 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2322 0x7820 +#define USB_VENDOR_ID_MOXA 0x110a +#define MOXA_DEVICE_ID_2210 0x2210 + /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 @@ -206,6 +210,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); @@ -2089,6 +2094,7 @@ static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); + u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor); u8 *buf; int device_type; @@ -2098,6 +2104,11 @@ static int mos7840_probe(struct usb_serial *serial, goto out; } + if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) { + device_type = MOSCHIP_DEVICE_ID_7820; + goto out; + } + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -2350,11 +2361,6 @@ static int mos7840_port_probe(struct usb_serial_port *port) goto error; } else dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); - - /* setting configuration feature to one */ - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - 0x03, 0x00, 0x01, 0x00, NULL, 0x00, - MOS_WDR_TIMEOUT); } return 0; error: diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 1bceb11f3782e043876b58a5dc987dc4eb9bceb9..084332a5855e0ff08ea9cf5f3f28d2921c35a94a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -200,6 +200,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define DELL_PRODUCT_5821E 0x81d7 +#define DELL_PRODUCT_5821E_ESIM 0x81e0 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -421,6 +422,7 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 +#define CINTERION_PRODUCT_CLS8 0x00b0 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -1042,6 +1044,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1149,6 +1153,14 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */ + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1842,6 +1854,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), + .driver_info = RSVD(0) | RSVD(4) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -1976,6 +1990,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, + { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 836cb93ba49e720f905d0d970115fbc787b3bf08..a7e41723c34cfa1566c5439f2e846ff57d89373e 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -778,7 +778,6 @@ static void ti_close(struct usb_serial_port *port) struct ti_port *tport; int port_number; int status; - int do_unlock; unsigned long flags; tdev = usb_get_serial_data(port->serial); @@ -802,16 +801,13 @@ static void ti_close(struct usb_serial_port *port) "%s - cannot send close port command, %d\n" , __func__, status); - /* if mutex_lock is interrupted, continue anyway */ - do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock); + mutex_lock(&tdev->td_open_close_lock); --tport->tp_tdev->td_open_port_count; - if (tport->tp_tdev->td_open_port_count <= 0) { + if (tport->tp_tdev->td_open_port_count == 0) { /* last port is closed, shut down interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); - tport->tp_tdev->td_open_port_count = 0; } - if (do_unlock) - mutex_unlock(&tdev->td_open_close_lock); + mutex_unlock(&tdev->td_open_close_lock); } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 4a037b4a79cf3168cb45d60d1b6488ac21681570..a894c0fdc04b7c49138da5f46a6e4ace56c893ab 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -315,10 +315,7 @@ static void serial_cleanup(struct tty_struct *tty) serial = port->serial; owner = serial->type->driver.owner; - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) - usb_autopm_put_interface(serial->interface); - mutex_unlock(&serial->disc_mutex); + usb_autopm_put_interface(serial->interface); usb_serial_put(serial); module_put(owner); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index d3ea90bef84d98a67f110f6040ca70ec12b7a5fb..345211f1a4915a180e7a5386e583b522b0c2a264 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -604,6 +604,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, command_port = port->serial->port[COMMAND_PORT]; command_info = usb_get_serial_port_data(command_port); + + if (command_port->bulk_out_size < datasize + 1) + return -EIO; + mutex_lock(&command_info->mutex); command_info->command_finished = false; @@ -677,6 +681,7 @@ static void firm_setup_port(struct tty_struct *tty) struct device *dev = &port->dev; struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios.c_cflag; + speed_t baud; port_settings.port = port->port_number + 1; @@ -737,11 +742,13 @@ static void firm_setup_port(struct tty_struct *tty) dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff); /* get the baud rate wanted */ - port_settings.baud = tty_get_baud_rate(tty); - dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud); + baud = tty_get_baud_rate(tty); + port_settings.baud = cpu_to_le32(baud); + dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud); /* fixme: should set validated settings */ - tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud); + tty_encode_baud_rate(tty, baud, baud); + /* handle any settings that aren't specified in the tty structure */ port_settings.lloop = 0; diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h index 38065df4d2d8217f10f24cf552127597411768b9..30169c859a74e095f8869c359b23f51e5cf59719 100644 --- a/drivers/usb/serial/whiteheat.h +++ b/drivers/usb/serial/whiteheat.h @@ -91,7 +91,7 @@ struct whiteheat_simple { struct whiteheat_port_settings { __u8 port; /* port number (1 to N) */ - __u32 baud; /* any value 7 - 460800, firmware calculates + __le32 baud; /* any value 7 - 460800, firmware calculates best fit; arrives little endian */ __u8 bits; /* 5, 6, 7, or 8 */ __u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index afb4b0bf47b37f9d922ac31c5b14c8fc4af3207d..fd5398efce41289108f0847f2e0e014843cf59e2 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -81,7 +81,6 @@ static const char* host_info(struct Scsi_Host *host) static int slave_alloc (struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); - int maxp; /* * Set the INQUIRY transfer length to 36. We don't use any of @@ -90,15 +89,6 @@ static int slave_alloc (struct scsi_device *sdev) */ sdev->inquiry_len = 36; - /* - * USB has unusual scatter-gather requirements: the length of each - * scatterlist element except the last must be divisible by the - * Bulk maxpacket value. Fortunately this value is always a - * power of 2. Inform the block layer about this requirement. - */ - maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0); - blk_queue_virt_boundary(sdev->request_queue, maxp - 1); - /* * Some host controllers may have alignment requirements. * We'll play it safe by requiring 512-byte alignment always. diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 97621e5bdad7bf442e6ec694e70e8c5054996ce8..597bc550034f911e5ae1f9ae1080834d04c3cbd0 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -796,29 +796,9 @@ static int uas_slave_alloc(struct scsi_device *sdev) { struct uas_dev_info *devinfo = (struct uas_dev_info *)sdev->host->hostdata; - int maxp; sdev->hostdata = devinfo; - /* - * We have two requirements here. We must satisfy the requirements - * of the physical HC and the demands of the protocol, as we - * definitely want no additional memory allocation in this path - * ruling out using bounce buffers. - * - * For a transmission on USB to continue we must never send - * a package that is smaller than maxpacket. Hence the length of each - * scatterlist element except the last must be divisible by the - * Bulk maxpacket value. - * If the HC does not ensure that through SG, - * the upper layer must do that. We must assume nothing - * about the capabilities off the HC, so we use the most - * pessimistic requirement. - */ - - maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0); - blk_queue_virt_boundary(sdev->request_queue, maxp - 1); - /* * The protocol has no requirements on alignment in the strict sense. * Controllers may or may not have alignment restrictions. diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 5133a0792eb0418391f74e4f56486731dc7e62fc..f243744866238a21fb4e1a304726c366ffabcae0 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -63,6 +63,7 @@ struct usb_skel { spinlock_t err_lock; /* lock for errors */ struct kref kref; struct mutex io_mutex; /* synchronize I/O with disconnect */ + unsigned long disconnected:1; wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */ }; #define to_skel_dev(d) container_of(d, struct usb_skel, kref) @@ -75,6 +76,7 @@ static void skel_delete(struct kref *kref) struct usb_skel *dev = to_skel_dev(kref); usb_free_urb(dev->bulk_in_urb); + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev->bulk_in_buffer); kfree(dev); @@ -126,10 +128,7 @@ static int skel_release(struct inode *inode, struct file *file) return -ENODEV; /* allow the device to be autosuspended */ - mutex_lock(&dev->io_mutex); - if (dev->interface) - usb_autopm_put_interface(dev->interface); - mutex_unlock(&dev->io_mutex); + usb_autopm_put_interface(dev->interface); /* decrement the count on our device */ kref_put(&dev->kref, skel_delete); @@ -241,7 +240,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, if (rv < 0) return rv; - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ rv = -ENODEV; goto exit; } @@ -422,7 +421,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; @@ -509,7 +508,7 @@ static int skel_probe(struct usb_interface *interface, init_waitqueue_head(&dev->bulk_in_wait); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ @@ -582,7 +581,7 @@ static void skel_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); usb_kill_anchored_urbs(&dev->submitted); diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 777a4058c407beee5abdacc72bd7c8c7644c3c00..d47176f9c310317e349fb245ca284ae03d88fad8 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -353,14 +353,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) epd = &ep->desc; - /* validate transfer_buffer_length */ - if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { - dev_err(&sdev->udev->dev, - "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", - pdu->u.cmd_submit.transfer_buffer_length); - return -1; - } - if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); @@ -487,8 +479,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0 && - pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { + if (pdu->u.cmd_submit.transfer_buffer_length > 0) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 9936a2f199b14de95d6573708923d6cf7f1eb6ee..8bda6455dfcb2c95342c820bbed5d735474e36ea 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -318,6 +318,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: break; } + break; default: usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n", wValue); @@ -465,13 +466,14 @@ static void vhci_tx_urb(struct urb *urb) { struct vhci_device *vdev = get_vdev(urb->dev); struct vhci_priv *priv; - struct vhci_hcd *vhci = vdev_to_vhci(vdev); + struct vhci_hcd *vhci; unsigned long flags; if (!vdev) { pr_err("could not get virtual device"); return; } + vhci = vdev_to_vhci(vdev); priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC); if (!priv) { @@ -512,8 +514,10 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, } vdev = &vhci->vdev[portnum-1]; - /* patch to usb_sg_init() is in 2.5.60 */ - BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); + if (!urb->transfer_buffer && urb->transfer_buffer_length) { + dev_dbg(dev, "Null URB transfer buffer\n"); + return -EINVAL; + } spin_lock_irqsave(&vhci->lock, flags); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index f9a75df2d22d1181e84ea61bd20b0a06eee10887..da3f0ed18c769344eee4107b3d7e774e448fe184 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -356,11 +356,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); /* - * Try to reset the device. The success of this is dependent on - * being able to lock the device, which is not always possible. + * Try to get the locks ourselves to prevent a deadlock. The + * success of this is dependent on being able to lock the device, + * which is not always possible. + * We can not use the "try" reset interface here, which will + * overwrite the previously restored configuration information. */ - if (vdev->reset_works && !pci_try_reset_function(pdev)) - vdev->needs_reset = false; + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) { + if (device_trylock(&pdev->dev)) { + if (!__pci_reset_function_locked(pdev)) + vdev->needs_reset = false; + device_unlock(&pdev->dev); + } + pci_cfg_access_unlock(pdev); + } pci_restore_state(pdev); out: @@ -417,10 +426,14 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) { if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { u8 pin; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || + vdev->nointx || vdev->pdev->is_virtfn) + return 0; + pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin) - return 1; + return pin ? 1 : 0; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { u8 pos; u16 flags; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 7b8a957b008d23cc9addec2380e36fd967bae5b2..84905d074c4ff4994ea2e1f4963fb8c0de6818e1 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1182,8 +1182,10 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) return -ENOMEM; ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); - if (ret) + if (ret) { + kfree(vdev->msi_perm); return ret; + } return len; } @@ -1606,6 +1608,15 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) return 0; } +/* + * Nag about hardware bugs, hopefully to have vendors fix them, but at least + * to collect a list of dependencies for the VF INTx pin quirk below. + */ +static const struct pci_device_id known_bogus_vf_intx_pin[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, + {} +}; + /* * For each device we allocate a pci_config_map that indicates the * capability occupying each dword and thus the struct perm_bits we @@ -1671,6 +1682,24 @@ int vfio_config_init(struct vfio_pci_device *vdev) if (pdev->is_virtfn) { *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); + + /* + * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register + * does not apply to VFs and VFs must implement this register + * as read-only with value zero. Userspace is not readily able + * to identify whether a device is a VF and thus that the pin + * definition on the device is bogus should it violate this + * requirement. We already virtualize the pin register for + * other purposes, so we simply need to replace the bogus value + * and consider VFs when we determine INTx IRQ count. + */ + if (vconfig[PCI_INTERRUPT_PIN] && + !pci_match_id(known_bogus_vf_intx_pin, pdev)) + pci_warn(pdev, + "Hardware bug: VF reports bogus INTx pin %d\n", + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c index cd50df5807eadb2b4bf18b6fa8efc906beda0dee..086611c7bc03cf3505bbe7e041dc6419841f30f9 100644 --- a/drivers/video/backlight/lm3639_bl.c +++ b/drivers/video/backlight/lm3639_bl.c @@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client) regmap_write(pchip->regmap, REG_ENABLE, 0x00); - if (&pchip->cdev_torch) - led_classdev_unregister(&pchip->cdev_torch); - if (&pchip->cdev_flash) - led_classdev_unregister(&pchip->cdev_flash); + led_classdev_unregister(&pchip->cdev_torch); + led_classdev_unregister(&pchip->cdev_flash); if (pchip->bled) device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode); return 0; diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 62c0cf79674fec39137fe39552288b26e23773e5..63ec9c71fe84a8b487ea0d8301009298c94fe806 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -997,97 +997,6 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs) DPRINTK("========================================\n"); } -/** - * fb_edid_add_monspecs() - add monitor video modes from E-EDID data - * @edid: 128 byte array with an E-EDID block - * @spacs: monitor specs to be extended - */ -void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) -{ - unsigned char *block; - struct fb_videomode *m; - int num = 0, i; - u8 svd[64], edt[(128 - 4) / DETAILED_TIMING_DESCRIPTION_SIZE]; - u8 pos = 4, svd_n = 0; - - if (!edid) - return; - - if (!edid_checksum(edid)) - return; - - if (edid[0] != 0x2 || - edid[2] < 4 || edid[2] > 128 - DETAILED_TIMING_DESCRIPTION_SIZE) - return; - - DPRINTK(" Short Video Descriptors\n"); - - while (pos < edid[2]) { - u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7; - pr_debug("Data block %u of %u bytes\n", type, len); - if (type == 2) { - for (i = pos; i < pos + len; i++) { - u8 idx = edid[pos + i] & 0x7f; - svd[svd_n++] = idx; - pr_debug("N%sative mode #%d\n", - edid[pos + i] & 0x80 ? "" : "on-n", idx); - } - } else if (type == 3 && len >= 3) { - /* Check Vendor Specific Data Block. For HDMI, - it is always 00-0C-03 for HDMI Licensing, LLC. */ - if (edid[pos + 1] == 3 && edid[pos + 2] == 0xc && - edid[pos + 3] == 0) - specs->misc |= FB_MISC_HDMI; - } - pos += len + 1; - } - - block = edid + edid[2]; - - DPRINTK(" Extended Detailed Timings\n"); - - for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; - i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) - if (PIXEL_CLOCK) - edt[num++] = block - edid; - - /* Yikes, EDID data is totally useless */ - if (!(num + svd_n)) - return; - - m = kzalloc((specs->modedb_len + num + svd_n) * - sizeof(struct fb_videomode), GFP_KERNEL); - - if (!m) - return; - - memcpy(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode)); - - for (i = specs->modedb_len; i < specs->modedb_len + num; i++) { - get_detailed_timing(edid + edt[i - specs->modedb_len], &m[i]); - if (i == specs->modedb_len) - m[i].flag |= FB_MODE_IS_FIRST; - pr_debug("Adding %ux%u@%u\n", m[i].xres, m[i].yres, m[i].refresh); - } - - for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) { - int idx = svd[i - specs->modedb_len - num]; - if (!idx || idx >= ARRAY_SIZE(cea_modes)) { - pr_warning("Reserved SVD code %d\n", idx); - } else if (!cea_modes[idx].xres) { - pr_warning("Unimplemented SVD code %d\n", idx); - } else { - memcpy(&m[i], cea_modes + idx, sizeof(m[i])); - pr_debug("Adding SVD #%d: %ux%u@%u\n", idx, - m[i].xres, m[i].yres, m[i].refresh); - } - } - - kfree(specs->modedb); - specs->modedb = m; - specs->modedb_len = specs->modedb_len + num + svd_n; -} - /* * VESA Generalized Timing Formula (GTF) */ @@ -1497,9 +1406,6 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var) void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs) { } -void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) -{ -} void fb_destroy_modedb(struct fb_videomode *modedb) { } @@ -1607,7 +1513,6 @@ EXPORT_SYMBOL(fb_firmware_edid); EXPORT_SYMBOL(fb_parse_edid); EXPORT_SYMBOL(fb_edid_to_monspecs); -EXPORT_SYMBOL(fb_edid_add_monspecs); EXPORT_SYMBOL(fb_get_mode); EXPORT_SYMBOL(fb_validate_mode); EXPORT_SYMBOL(fb_destroy_modedb); diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 455a15f701720c89374656e55f7427d7d7091790..a9d76e1b437815cd9123269f844c48669408250e 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -289,63 +289,6 @@ static const struct fb_videomode modedb[] = { }; #ifdef CONFIG_FB_MODE_HELPERS -const struct fb_videomode cea_modes[65] = { - /* #1: 640x480p@59.94/60Hz */ - [1] = { - NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, - FB_VMODE_NONINTERLACED, 0, - }, - /* #3: 720x480p@59.94/60Hz */ - [3] = { - NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, - FB_VMODE_NONINTERLACED, 0, - }, - /* #5: 1920x1080i@59.94/60Hz */ - [5] = { - NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_INTERLACED, 0, - }, - /* #7: 720(1440)x480iH@59.94/60Hz */ - [7] = { - NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, - FB_VMODE_INTERLACED, 0, - }, - /* #9: 720(1440)x240pH@59.94/60Hz */ - [9] = { - NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, - FB_VMODE_NONINTERLACED, 0, - }, - /* #18: 720x576pH@50Hz */ - [18] = { - NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, - FB_VMODE_NONINTERLACED, 0, - }, - /* #19: 1280x720p@50Hz */ - [19] = { - NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_NONINTERLACED, 0, - }, - /* #20: 1920x1080i@50Hz */ - [20] = { - NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_INTERLACED, 0, - }, - /* #32: 1920x1080p@23.98/24Hz */ - [32] = { - NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5, - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_NONINTERLACED, 0, - }, - /* #35: (2880)x480p4x@59.94/60Hz */ - [35] = { - NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, - FB_VMODE_NONINTERLACED, 0, - }, -}; - const struct fb_videomode vesa_modes[] = { /* 0 640x350-85 VESA */ { NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3, diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index e72a315f73f53d39e3bdca9057f1bd09574e1a4e..5767ca194171db7f10681a52646322ccb65d5fcf 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -600,6 +600,7 @@ struct mdss_mdp_ctl { struct mutex flush_lock; struct mutex *shared_lock; struct mutex rsrc_lock; + struct mutex vsync_handler_lock; spinlock_t spin_lock; struct mdss_panel_data *panel_data; diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index 029634bd06d3e8ae6e6308b0d26bf95a7dd702f7..75c2b6ffc23c54bdf7d38021e8daef4bc00a3356 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2452,6 +2452,7 @@ struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata, mutex_init(&ctl->offlock); mutex_init(&ctl->flush_lock); mutex_init(&ctl->rsrc_lock); + mutex_init(&ctl->vsync_handler_lock); spin_lock_init(&ctl->spin_lock); BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head); pr_debug("alloc ctl_num=%d\n", ctl->num); diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c index 1c70637e6191569ebf3c43aca413fd0c8afec4da..9e27bb13c3a4917ea8c3c80ad648f0ea22a79d71 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c @@ -1979,6 +1979,49 @@ static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl, return -ENODEV; } + pr_debug("%pS->%s ctl:%d\n", + __builtin_return_address(0), __func__, ctl->num); + + mutex_lock(&ctl->vsync_handler_lock); + + MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888); + sctl = mdss_mdp_get_split_ctl(ctl); + if (sctl) + sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX]; + + spin_lock_irqsave(&ctx->clk_lock, flags); + if (handle->enabled) { + handle->enabled = false; + list_del_init(&handle->list); + disable_vsync_irq = !handle->cmd_post_flush; + } + spin_unlock_irqrestore(&ctx->clk_lock, flags); + + if (disable_vsync_irq) { + /* disable rd_ptr interrupt and clocks */ + mdss_mdp_setup_vsync(ctx, false); + complete(&ctx->stop_comp); + } + + mutex_unlock(&ctl->vsync_handler_lock); + + return 0; +} + +static int mdss_mdp_cmd_remove_vsync_handler_nolock(struct mdss_mdp_ctl *ctl, + struct mdss_mdp_vsync_handler *handle) +{ + struct mdss_mdp_ctl *sctl; + struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL; + unsigned long flags; + bool disable_vsync_irq = false; + + ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX]; + if (!ctx) { + pr_err("%s: invalid ctx\n", __func__); + return -ENODEV; + } + pr_debug("%pS->%s ctl:%d\n", __builtin_return_address(0), __func__, ctl->num); @@ -3216,8 +3259,12 @@ static int mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl, return -ENODEV; } - list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list) - mdss_mdp_cmd_remove_vsync_handler(ctl, handle); + mutex_lock(&ctl->vsync_handler_lock); + list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list) { + mdss_mdp_cmd_remove_vsync_handler_nolock(ctl, handle); + } + mutex_unlock(&ctl->vsync_handler_lock); + if (mdss_mdp_is_lineptr_supported(ctl)) mdss_mdp_cmd_lineptr_ctrl(ctl, false); MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY); diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index 31c301d6be621aef9f3a35f9bef3beb13eeff0d2..52e161dbd204781ae06a5f31c5bad95b46224b6e 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -105,11 +105,11 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fbtype __user *f = (struct fbtype __user *) arg; if (put_user(type, &f->fb_type) || - __put_user(info->var.yres, &f->fb_height) || - __put_user(info->var.xres, &f->fb_width) || - __put_user(fb_depth, &f->fb_depth) || - __put_user(0, &f->fb_cmsize) || - __put_user(fb_size, &f->fb_cmsize)) + put_user(info->var.yres, &f->fb_height) || + put_user(info->var.xres, &f->fb_width) || + put_user(fb_depth, &f->fb_depth) || + put_user(0, &f->fb_cmsize) || + put_user(fb_size, &f->fb_cmsize)) return -EFAULT; return 0; } @@ -124,10 +124,10 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned int index, count, i; if (get_user(index, &c->index) || - __get_user(count, &c->count) || - __get_user(ured, &c->red) || - __get_user(ugreen, &c->green) || - __get_user(ublue, &c->blue)) + get_user(count, &c->count) || + get_user(ured, &c->red) || + get_user(ugreen, &c->green) || + get_user(ublue, &c->blue)) return -EFAULT; cmap.len = 1; @@ -164,13 +164,13 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, u8 red, green, blue; if (get_user(index, &c->index) || - __get_user(count, &c->count) || - __get_user(ured, &c->red) || - __get_user(ugreen, &c->green) || - __get_user(ublue, &c->blue)) + get_user(count, &c->count) || + get_user(ured, &c->red) || + get_user(ugreen, &c->green) || + get_user(ublue, &c->blue)) return -EFAULT; - if (index + count > cmap->len) + if (index > cmap->len || count > cmap->len - index) return -EINVAL; for (i = 0; i < count; i++) { diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 2925d5ce8d3e561c1d674cab95be1ce757ed8f39..1267b93c03bd0765dc171bcb3a31284f6e3a190b 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -430,7 +430,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) if (ret < 0) return ret; - ret = ssd1307fb_write_cmd(par->client, 0x0); + ret = ssd1307fb_write_cmd(par->client, par->page_offset); if (ret < 0) return ret; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 2f09294c5946090c5f54e78b5f32ac2c07eeede8..e459cd7302e2739efb5c699c3f402d7ab407aee5 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -427,7 +427,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, kfree(desc); END_USE(vq); - return -EIO; + return -ENOMEM; } /** diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 5098982e1a585f1fbde452b22eb9179f8c0a6b49..e5c162b05376a05a3c234d9cc6bbd049cab79796 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -58,7 +58,7 @@ #define IMX2_WDT_WMCR 0x08 /* Misc Register */ -#define IMX2_WDT_MAX_TIME 128 +#define IMX2_WDT_MAX_TIME 128U #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8) @@ -183,7 +183,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog, { unsigned int actual; - actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000); + actual = min(new_timeout, IMX2_WDT_MAX_TIME); __imx2_wdt_set_timeout(wdog, actual); wdog->timeout = new_timeout; return 0; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 6af117af978041d9ec2438df5829f6f4e9012993..731cf54f75c6585b40d28b8d398187a00ced2d2c 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -358,7 +358,10 @@ static enum bp_state reserve_additional_memory(void) * callers drop the mutex before trying again. */ mutex_unlock(&balloon_mutex); + /* add_memory_resource() requires the device_hotplug lock */ + lock_device_hotplug(); rc = add_memory_resource(nid, resource, memhp_auto_online); + unlock_device_hotplug(); mutex_lock(&balloon_mutex); if (rc) { diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index 7494dbeb4409c0a6c10fc615f275e552fe3e0807..db58aaa4dc5983ac41b2c6c228440749790f7412 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -29,6 +29,8 @@ #include "../pci/pci.h" #ifdef CONFIG_PCI_MMCONFIG #include + +static int xen_mcfg_late(void); #endif static bool __read_mostly pci_seg_supported = true; @@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev) #ifdef CONFIG_PCI_IOV struct pci_dev *physfn = pci_dev->physfn; #endif - +#ifdef CONFIG_PCI_MMCONFIG + static bool pci_mcfg_reserved = false; + /* + * Reserve MCFG areas in Xen on first invocation due to this being + * potentially called from inside of acpi_init immediately after + * MCFG table has been finally parsed. + */ + if (!pci_mcfg_reserved) { + xen_mcfg_late(); + pci_mcfg_reserved = true; + } +#endif if (pci_seg_supported) { struct { struct physdev_pci_device_add add; @@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void) arch_initcall(register_xen_pci_notifier); #ifdef CONFIG_PCI_MMCONFIG -static int __init xen_mcfg_late(void) +static int xen_mcfg_late(void) { struct pci_mmcfg_region *cfg; int rc; @@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void) } return 0; } -/* - * Needs to be done after acpi_init which are subsys_initcall. - */ -subsys_initcall_sync(xen_mcfg_late); #endif diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 79ff727254bb677a93b61fe2207b4c114e2c89ac..e963b83afc7177723f71d8bb5fee25f8625792f3 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) v9inode = V9FS_I(inode); mutex_lock(&v9inode->v_mutex); if (!v9inode->writeback_fid && + (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) { /* * clone a fid and add it to writeback_fid @@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start - 1), }; + if (!(vma->vm_flags & VM_SHARED)) + return; p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma); diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index afdf4e3cafc2aa5f1c1ff1dc0e8cfb256c6a3b89..37c2093a24d3c5c4b763a6d8dbf666942865643f 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -14,14 +14,31 @@ #include #include +static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } +static inline char *next_non_spacetab(char *first, const char *last) +{ + for (; first <= last; first++) + if (!spacetab(*first)) + return first; + return NULL; +} +static inline char *next_terminator(char *first, const char *last) +{ + for (; first <= last; first++) + if (spacetab(*first) || !*first) + return first; + return NULL; +} + static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; - char *cp; + char *cp, *buf_end; struct file *file; char interp[BINPRM_BUF_SIZE]; int retval; + /* Not ours to exec if we don't start with "#!". */ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) return -ENOEXEC; @@ -34,18 +51,40 @@ static int load_script(struct linux_binprm *bprm) if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) return -ENOENT; - /* - * This section does the #! interpretation. - * Sorta complicated, but hopefully it will work. -TYT - */ - + /* Release since we are not mapping a binary into memory. */ allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; - bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; - if ((cp = strchr(bprm->buf, '\n')) == NULL) - cp = bprm->buf+BINPRM_BUF_SIZE-1; + /* + * This section handles parsing the #! line into separate + * interpreter path and argument strings. We must be careful + * because bprm->buf is not yet guaranteed to be NUL-terminated + * (though the buffer will have trailing NUL padding when the + * file size was smaller than the buffer size). + * + * We do not want to exec a truncated interpreter path, so either + * we find a newline (which indicates nothing is truncated), or + * we find a space/tab/NUL after the interpreter path (which + * itself may be preceded by spaces/tabs). Truncating the + * arguments is fine: the interpreter can re-read the script to + * parse them on its own. + */ + buf_end = bprm->buf + sizeof(bprm->buf) - 1; + cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); + if (!cp) { + cp = next_non_spacetab(bprm->buf + 2, buf_end); + if (!cp) + return -ENOEXEC; /* Entire buf is spaces/tabs */ + /* + * If there is no later space/tab/NUL we must assume the + * interpreter path is truncated. + */ + if (!next_terminator(cp, buf_end)) + return -ENOEXEC; + cp = buf_end; + } + /* NUL-terminate the buffer and any trailing spaces/tabs. */ *cp = '\0'; while (cp > bprm->buf) { cp--; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c94d3390cbfc87edc4dd688a741a47770abac291..3faccbf35e9f4c68dca0e626ee48423b8f22a392 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1406,6 +1406,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) struct tree_mod_elem *tm; struct extent_buffer *eb = NULL; struct extent_buffer *eb_root; + u64 eb_root_owner = 0; struct extent_buffer *old; struct tree_mod_root *old_root = NULL; u64 old_generation = 0; @@ -1439,6 +1440,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) free_extent_buffer(old); } } else if (old_root) { + eb_root_owner = btrfs_header_owner(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); eb = alloc_dummy_extent_buffer(root->fs_info, logical, @@ -1457,7 +1459,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); - btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); + btrfs_set_header_owner(eb, eb_root_owner); btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } @@ -2971,6 +2973,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, again: b = get_old_root(root, time_seq); + if (!b) { + ret = -EIO; + goto done; + } level = btrfs_header_level(b); p->locks[level] = BTRFS_READ_LOCK; @@ -5465,6 +5471,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root, advance_left = advance_right = 0; while (1) { + cond_resched(); if (advance_left && !left_end_reached) { ret = tree_advance(left_root, left_path, &left_level, left_root_level, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7938c48c72ff0d3e8651e540f6b19514305f159d..538f378eea52abdb0e7b60a3a34470486f353230 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7571,6 +7571,14 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, */ if ((flags & extra) && !(block_group->flags & extra)) goto loop; + + /* + * This block group has different flags than we want. + * It's possible that we have MIXED_GROUP flag but no + * block group is mixed. Just skip such block group. + */ + btrfs_release_block_group(block_group, delalloc); + continue; } have_block_group: @@ -10317,6 +10325,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) btrfs_err(info, "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", cache->key.objectid); + btrfs_put_block_group(cache); ret = -EINVAL; goto error; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index f25233093d68c40c53c297c9857b4ce224e9c3b8..0355e6d9e21cbcc987283e59dc4a6907b88bc754 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -759,10 +759,10 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans, return ret; } -static int update_qgroup_status_item(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info, - struct btrfs_root *root) +static int update_qgroup_status_item(struct btrfs_trans_handle *trans) { + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *quota_root = fs_info->quota_root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *l; @@ -778,7 +778,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); + ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); if (ret > 0) ret = -ENOENT; @@ -1863,7 +1863,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans, fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; spin_unlock(&fs_info->qgroup_lock); - ret = update_qgroup_status_item(trans, fs_info, quota_root); + ret = update_qgroup_status_item(trans); if (ret) fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; @@ -2380,9 +2380,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) btrfs_free_path(path); mutex_lock(&fs_info->qgroup_rescan_lock); - if (!btrfs_fs_closing(fs_info)) - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; - if (err > 0 && fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; @@ -2398,16 +2395,30 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) trans = btrfs_start_transaction(fs_info->quota_root, 1); if (IS_ERR(trans)) { err = PTR_ERR(trans); + trans = NULL; btrfs_err(fs_info, "fail to start transaction for status update: %d\n", err); - goto done; } - ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); - if (ret < 0) { - err = ret; - btrfs_err(fs_info, "fail to update qgroup status: %d", err); + + mutex_lock(&fs_info->qgroup_rescan_lock); + if (!btrfs_fs_closing(fs_info)) + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; + if (trans) { + ret = update_qgroup_status_item(trans); + if (ret < 0) { + err = ret; + btrfs_err(fs_info, "fail to update qgroup status: %d", + err); + } } + fs_info->qgroup_rescan_running = false; + complete_all(&fs_info->qgroup_rescan_completion); + mutex_unlock(&fs_info->qgroup_rescan_lock); + + if (!trans) + return; + btrfs_end_transaction(trans, fs_info->quota_root); if (btrfs_fs_closing(fs_info)) { @@ -2418,12 +2429,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) } else { btrfs_err(fs_info, "qgroup scan failed with %d", err); } - -done: - mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = false; - mutex_unlock(&fs_info->qgroup_rescan_lock); - complete_all(&fs_info->qgroup_rescan_completion); } /* diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 02bb7b52cb3631e6b766a93b80765f823381fb3c..65e1eaa5df8422bf4019d7f2c65923ec54526fef 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4846,7 +4846,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, err = btrfs_log_inode(trans, root, other_inode, LOG_OTHER_INODE, 0, LLONG_MAX, ctx); - iput(other_inode); + btrfs_add_delayed_iput(other_inode); if (err) goto out_unlock; else @@ -5264,7 +5264,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, } if (btrfs_inode_in_log(di_inode, trans->transid)) { - iput(di_inode); + btrfs_add_delayed_iput(di_inode); break; } @@ -5276,7 +5276,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, if (!ret && btrfs_must_commit_transaction(trans, di_inode)) ret = 1; - iput(di_inode); + btrfs_add_delayed_iput(di_inode); if (ret) goto next_dir_inode; if (ctx->log_new_dentries) { @@ -5422,7 +5422,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, if (!ret && ctx && ctx->log_new_dentries) ret = log_new_dir_dentries(trans, root, dir_inode, ctx); - iput(dir_inode); + btrfs_add_delayed_iput(dir_inode); if (ret) goto out; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 82df349b84f7984bb8cbf84fe3cf330dfe3dbbd3..f5d9835264aa16f863b2fbd2021e620486f2fcf4 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -933,6 +933,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + /* remove from inode's cap rbtree, and clear auth cap */ + rb_erase(&cap->ci_node, &ci->i_caps); + if (ci->i_auth_cap == cap) + ci->i_auth_cap = NULL; + /* remove from session list */ spin_lock(&session->s_cap_lock); if (session->s_cap_iterator == cap) { @@ -968,11 +973,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) spin_unlock(&session->s_cap_lock); - /* remove from inode list */ - rb_erase(&cap->ci_node, &ci->i_caps); - if (ci->i_auth_cap == cap) - ci->i_auth_cap = NULL; - if (removed) ceph_put_cap(mdsc, cap); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 339fdf6355df71bfe5234ffe3c42988bb93c9b88..049cff197d2a11f65507156348eed5986c536312 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -800,7 +800,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ci->i_version = le64_to_cpu(info->version); inode->i_version++; inode->i_rdev = le32_to_cpu(info->rdev); - inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; + /* directories have fl_stripe_unit set to zero */ + if (le32_to_cpu(info->layout.fl_stripe_unit)) + inode->i_blkbits = + fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; + else + inode->i_blkbits = CEPH_BLOCK_SHIFT; if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && (issued & CEPH_CAP_AUTH_EXCL) == 0) { @@ -1625,7 +1630,6 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, if (IS_ERR(realdn)) { err = PTR_ERR(realdn); d_drop(dn); - dn = NULL; goto next_item; } dn = realdn; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 67cb9d078bfa749269fae629cbeda1f6f0db6d32..3139fbd4c34e3b835ec39e666cc030375ad660fa 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3410,7 +3410,9 @@ static void delayed_work(struct work_struct *work) pr_info("mds%d hung\n", s->s_mds); } } - if (s->s_state < CEPH_MDS_SESSION_OPEN) { + if (s->s_state == CEPH_MDS_SESSION_NEW || + s->s_state == CEPH_MDS_SESSION_RESTARTING || + s->s_state == CEPH_MDS_SESSION_REJECTED) { /* this mds is failed or recovering, just wait */ ceph_put_mds_session(s); continue; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 5367b684c1f7415ce09caa7fc8531094b281b3ad..7ae21ad420fbfb40e89a7a12ca9184a869149b4e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1178,6 +1178,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); struct cifsInodeInfo { bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ + /* + * NOTE: Some code paths call down_read(lock_sem) twice, so + * we must always use use cifs_down_write() instead of down_write() + * for this semaphore to avoid deadlocks. + */ struct rw_semaphore lock_sem; /* protect the fields above */ /* BB add in lists for dirty pages i.e. write caching info for oplock */ struct list_head openFileList; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index cd8025a249bba990170e23d9ae6406e134ebdff2..cdf244df91c255ace89ada9645a0fab98d36e679 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -138,6 +138,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, const unsigned int xid); extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile); +extern void cifs_down_write(struct rw_semaphore *sem); extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 4b350acc1187724a4e1fdd70349288bff0ee4aac..d4f8bb315c61283efff797a6281863566e35b622 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2447,6 +2447,7 @@ static int cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) { int rc = 0; + int is_domain = 0; const char *delim, *payload; char *desc; ssize_t len; @@ -2494,6 +2495,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) rc = PTR_ERR(key); goto out_err; } + is_domain = 1; } down_read(&key->sem); @@ -2551,6 +2553,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) goto out_key_put; } + /* + * If we have a domain key then we must set the domainName in the + * for the request. + */ + if (is_domain && ses->domainName) { + vol->domainname = kstrndup(ses->domainName, + strlen(ses->domainName), + GFP_KERNEL); + if (!vol->domainname) { + cifs_dbg(FYI, "Unable to allocate %zd bytes for " + "domain\n", len); + rc = -ENOMEM; + kfree(vol->username); + vol->username = NULL; + kzfree(vol->password); + vol->password = NULL; + goto out_key_put; + } + } + out_key_put: up_read(&key->sem); key_put(key); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index e98e24eaa6a878b6d9a69b2112a6140605824745..d6475dcce9dfbd7a377c35a7dd832e3b658cdb5a 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -830,10 +830,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, static int cifs_d_revalidate(struct dentry *direntry, unsigned int flags) { + struct inode *inode; + if (flags & LOOKUP_RCU) return -ECHILD; if (d_really_is_positive(direntry)) { + inode = d_inode(direntry); + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + if (cifs_revalidate_dentry(direntry)) return 0; else { @@ -844,7 +850,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) * attributes will have been updated by * cifs_revalidate_dentry(). */ - if (IS_AUTOMOUNT(d_inode(direntry)) && + if (IS_AUTOMOUNT(inode) && !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) { spin_lock(&direntry->d_lock); direntry->d_flags |= DCACHE_NEED_AUTOMOUNT; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index df3ee0b6264fe208f860ba362d86353ef15d40ed..1c3f262d9c4d4643693b8287e6538381d849cff3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -252,6 +252,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, xid, fid); + if (rc) { + server->ops->close(xid, tcon, fid); + if (rc == -ESTALE) + rc = -EOPENSTALE; + } + out: kfree(buf); return rc; @@ -274,6 +280,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode) return has_locks; } +void +cifs_down_write(struct rw_semaphore *sem) +{ + while (!down_write_trylock(sem)) + msleep(10); +} + struct cifsFileInfo * cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) @@ -299,7 +312,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, INIT_LIST_HEAD(&fdlocks->locks); fdlocks->cfile = cfile; cfile->llist = fdlocks; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_add(&fdlocks->llist, &cinode->llist); up_write(&cinode->lock_sem); @@ -451,7 +464,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) * Delete any outstanding lock records. We'll lose them when the file * is closed anyway. */ - down_write(&cifsi->lock_sem); + cifs_down_write(&cifsi->lock_sem); list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { list_del(&li->llist); cifs_del_lock_waiters(li); @@ -1005,7 +1018,7 @@ static void cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) { struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_add_tail(&lock->llist, &cfile->llist->locks); up_write(&cinode->lock_sem); } @@ -1027,7 +1040,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, try_again: exist = false; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, lock->type, &conf_lock, CIFS_LOCK_OP); @@ -1049,7 +1062,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, (lock->blist.next == &lock->blist)); if (!rc) goto try_again; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_del_init(&lock->blist); } @@ -1102,7 +1115,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock) return rc; try_again: - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; @@ -1306,7 +1319,7 @@ cifs_push_locks(struct cifsFileInfo *cfile) int rc = 0; /* we are going to update can_cache_brlcks here - need a write access */ - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); return rc; @@ -1495,7 +1508,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, if (!buf) return -ENOMEM; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); for (i = 0; i < 2; i++) { cur = buf; num = 0; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 786f67bee43af5bf6da842410f69f247feba6a15..b1c0961e6b3f2086a135e87a868a5b827dc85b09 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -405,6 +405,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, /* if uniqueid is different, return error */ if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM && CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) { + CIFS_I(*pinode)->time = 0; /* force reval */ rc = -ESTALE; goto cgiiu_exit; } @@ -412,6 +413,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, /* if filetype is different, return error */ if (unlikely(((*pinode)->i_mode & S_IFMT) != (fattr.cf_mode & S_IFMT))) { + CIFS_I(*pinode)->time = 0; /* force reval */ rc = -ESTALE; goto cgiiu_exit; } @@ -917,6 +919,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, /* if uniqueid is different, return error */ if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM && CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) { + CIFS_I(*inode)->time = 0; /* force reval */ rc = -ESTALE; goto cgii_exit; } @@ -924,6 +927,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, /* if filetype is different, return error */ if (unlikely(((*inode)->i_mode & S_IFMT) != (fattr.cf_mode & S_IFMT))) { + CIFS_I(*inode)->time = 0; /* force reval */ rc = -ESTALE; goto cgii_exit; } diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index cc88f4f0325ef4003bd248c36ca96865ff1b4ddd..bed97333022798df9f759c1c9d9ef9ebeb1ccbda 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -130,10 +130,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = { {0, 0} }; -static const struct smb_to_posix_error mapping_table_ERRHRD[] = { - {0, 0} -}; - /* * Convert a string containing text IPv4 or IPv6 address to binary form. * diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index f7a9adab0b84fcc9891733dcb29303cefdebaf21..6f5d78b172bac2edcbe75f3c1e913d36197a5d8e 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -180,6 +180,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server) /* we do not want to loop forever */ last_mid = cur_mid; cur_mid++; + /* avoid 0xFFFF MID */ + if (cur_mid == 0xffff) + cur_mid++; /* * This nested loop looks more expensive than it is. diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index dee5250701deb3275ef281e11b4892ebbdfab69e..41f1a5dd33a53ade4cb4cb43d0bf92c167fcb6b8 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -138,7 +138,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, cur = buf; - down_write(&cinode->lock_sem); + cifs_down_write(&cinode->lock_sem); list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset || (flock->fl_start + length) < diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index f7ad2a3677be4dbc0047fcb1666830bcc7bd8323..67d9b7a277a3bcb81d0224876e8163bcb44610ce 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1419,6 +1419,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) return; + /* Check if the server granted an oplock rather than a lease */ + if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE) + return smb2_set_oplock_level(cinode, oplock, epoch, + purge_cache); + if (oplock & SMB2_LEASE_READ_CACHING_HE) { new_oplock |= CIFS_CACHE_READ_FLG; strcat(message, "R"); diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 20af5187ba63cc14150185952a3f7cdf9526556d..6634ad3567e0b0ff3277cf0d82e46f0d394e2ba6 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -31,7 +31,7 @@ #include "cifs_fs_sb.h" #include "cifs_unicode.h" -#define MAX_EA_VALUE_SIZE 65535 +#define MAX_EA_VALUE_SIZE CIFSMaxBufSize #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */ diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 4b7da4409c60c5d3e99dadee2b1d0d0a1db88cae..5b832e83772a691698d1a5aae4e916a1f391d0e2 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -160,6 +160,7 @@ struct compat_video_event { unsigned int frame_rate; } u; }; +#define VIDEO_GET_EVENT32 _IOR('o', 28, struct compat_video_event) static int do_video_get_event(struct file *file, unsigned int cmd, struct compat_video_event __user *up) @@ -171,7 +172,7 @@ static int do_video_get_event(struct file *file, if (kevent == NULL) return -EFAULT; - err = do_ioctl(file, cmd, (unsigned long)kevent); + err = do_ioctl(file, VIDEO_GET_EVENT, (unsigned long)kevent); if (!err) { err = convert_in_user(&kevent->type, &up->type); err |= convert_in_user(&kevent->timestamp, &up->timestamp); @@ -190,6 +191,7 @@ struct compat_video_still_picture { compat_uptr_t iFrame; int32_t size; }; +#define VIDEO_STILLPICTURE32 _IOW('o', 30, struct compat_video_still_picture) static int do_video_stillpicture(struct file *file, unsigned int cmd, struct compat_video_still_picture __user *up) @@ -212,7 +214,7 @@ static int do_video_stillpicture(struct file *file, if (err) return -EFAULT; - err = do_ioctl(file, cmd, (unsigned long) up_native); + err = do_ioctl(file, VIDEO_STILLPICTURE, (unsigned long) up_native); return err; } @@ -1484,9 +1486,9 @@ static long do_ioctl_trans(unsigned int cmd, return rtc_ioctl(file, cmd, argp); /* dvb */ - case VIDEO_GET_EVENT: + case VIDEO_GET_EVENT32: return do_video_get_event(file, cmd, argp); - case VIDEO_STILLPICTURE: + case VIDEO_STILLPICTURE32: return do_video_stillpicture(file, cmd, argp); case VIDEO_SET_SPU_PALETTE: return do_video_set_spu_palette(file, cmd, argp); diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index ccc31fa6f1a7e13cbcb6f9b0af501fbe52bd971f..16eb59adf5aab96b1d38bd9ba0a399df8eac9698 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -34,6 +34,15 @@ #include #include +struct configfs_fragment { + atomic_t frag_count; + struct rw_semaphore frag_sem; + bool frag_dead; +}; + +void put_fragment(struct configfs_fragment *); +struct configfs_fragment *get_fragment(struct configfs_fragment *); + struct configfs_dirent { atomic_t s_count; int s_dependent_count; @@ -48,6 +57,7 @@ struct configfs_dirent { #ifdef CONFIG_LOCKDEP int s_depth; #endif + struct configfs_fragment *s_frag; }; #define CONFIGFS_ROOT 0x0001 @@ -75,8 +85,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_create_bin_file(struct config_item *, const struct configfs_bin_attribute *); -extern int configfs_make_dirent(struct configfs_dirent *, - struct dentry *, void *, umode_t, int); +extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *, + void *, umode_t, int, struct configfs_fragment *); extern int configfs_dirent_is_ready(struct configfs_dirent *); extern void configfs_hash_and_remove(struct dentry * dir, const char * name); @@ -151,6 +161,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd) { if (!(sd->s_type & CONFIGFS_ROOT)) { kfree(sd->s_iattr); + put_fragment(sd->s_frag); kmem_cache_free(configfs_dir_cachep, sd); } } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index a1985a9ad2d640f07c4663de421c36aff3fab8f2..c2ef617d2f97db5154f12bf023fc6775abd3331a 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -164,11 +164,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) #endif /* CONFIG_LOCKDEP */ +static struct configfs_fragment *new_fragment(void) +{ + struct configfs_fragment *p; + + p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL); + if (p) { + atomic_set(&p->frag_count, 1); + init_rwsem(&p->frag_sem); + p->frag_dead = false; + } + return p; +} + +void put_fragment(struct configfs_fragment *frag) +{ + if (frag && atomic_dec_and_test(&frag->frag_count)) + kfree(frag); +} + +struct configfs_fragment *get_fragment(struct configfs_fragment *frag) +{ + if (likely(frag)) + atomic_inc(&frag->frag_count); + return frag; +} + /* * Allocates a new configfs_dirent and links it to the parent configfs_dirent */ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, - void *element, int type) + void *element, int type, + struct configfs_fragment *frag) { struct configfs_dirent * sd; @@ -188,6 +215,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren kmem_cache_free(configfs_dir_cachep, sd); return ERR_PTR(-ENOENT); } + sd->s_frag = get_fragment(frag); list_add(&sd->s_sibling, &parent_sd->s_children); spin_unlock(&configfs_dirent_lock); @@ -222,11 +250,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd, int configfs_make_dirent(struct configfs_dirent * parent_sd, struct dentry * dentry, void * element, - umode_t mode, int type) + umode_t mode, int type, struct configfs_fragment *frag) { struct configfs_dirent * sd; - sd = configfs_new_dirent(parent_sd, element, type); + sd = configfs_new_dirent(parent_sd, element, type, frag); if (IS_ERR(sd)) return PTR_ERR(sd); @@ -273,7 +301,8 @@ static void init_symlink(struct inode * inode) * until it is validated by configfs_dir_set_ready() */ -static int configfs_create_dir(struct config_item *item, struct dentry *dentry) +static int configfs_create_dir(struct config_item *item, struct dentry *dentry, + struct configfs_fragment *frag) { int error; umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; @@ -286,7 +315,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry) return error; error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, - CONFIGFS_DIR | CONFIGFS_USET_CREATING); + CONFIGFS_DIR | CONFIGFS_USET_CREATING, + frag); if (unlikely(error)) return error; @@ -351,9 +381,10 @@ int configfs_create_link(struct configfs_symlink *sl, { int err = 0; umode_t mode = S_IFLNK | S_IRWXUGO; + struct configfs_dirent *p = parent->d_fsdata; - err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode, - CONFIGFS_ITEM_LINK); + err = configfs_make_dirent(p, dentry, sl, mode, + CONFIGFS_ITEM_LINK, p->s_frag); if (!err) { err = configfs_create(dentry, mode, init_symlink); if (err) { @@ -612,7 +643,8 @@ static int populate_attrs(struct config_item *item) static int configfs_attach_group(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry); + struct dentry *dentry, + struct configfs_fragment *frag); static void configfs_detach_group(struct config_item *item); static void detach_groups(struct config_group *group) @@ -660,7 +692,8 @@ static void detach_groups(struct config_group *group) * try using vfs_mkdir. Just a thought. */ static int create_default_group(struct config_group *parent_group, - struct config_group *group) + struct config_group *group, + struct configfs_fragment *frag) { int ret; struct configfs_dirent *sd; @@ -676,7 +709,7 @@ static int create_default_group(struct config_group *parent_group, d_add(child, NULL); ret = configfs_attach_group(&parent_group->cg_item, - &group->cg_item, child); + &group->cg_item, child, frag); if (!ret) { sd = child->d_fsdata; sd->s_type |= CONFIGFS_USET_DEFAULT; @@ -690,13 +723,14 @@ static int create_default_group(struct config_group *parent_group, return ret; } -static int populate_groups(struct config_group *group) +static int populate_groups(struct config_group *group, + struct configfs_fragment *frag) { struct config_group *new_group; int ret = 0; list_for_each_entry(new_group, &group->default_groups, group_entry) { - ret = create_default_group(group, new_group); + ret = create_default_group(group, new_group, frag); if (ret) { detach_groups(group); break; @@ -810,11 +844,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g */ static int configfs_attach_item(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry) + struct dentry *dentry, + struct configfs_fragment *frag) { int ret; - ret = configfs_create_dir(item, dentry); + ret = configfs_create_dir(item, dentry, frag); if (!ret) { ret = populate_attrs(item); if (ret) { @@ -844,12 +879,13 @@ static void configfs_detach_item(struct config_item *item) static int configfs_attach_group(struct config_item *parent_item, struct config_item *item, - struct dentry *dentry) + struct dentry *dentry, + struct configfs_fragment *frag) { int ret; struct configfs_dirent *sd; - ret = configfs_attach_item(parent_item, item, dentry); + ret = configfs_attach_item(parent_item, item, dentry, frag); if (!ret) { sd = dentry->d_fsdata; sd->s_type |= CONFIGFS_USET_DIR; @@ -865,7 +901,7 @@ static int configfs_attach_group(struct config_item *parent_item, */ inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); configfs_adjust_dir_dirent_depth_before_populate(sd); - ret = populate_groups(to_config_group(item)); + ret = populate_groups(to_config_group(item), frag); if (ret) { configfs_detach_item(item); d_inode(dentry)->i_flags |= S_DEAD; @@ -1260,6 +1296,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode struct configfs_dirent *sd; struct config_item_type *type; struct module *subsys_owner = NULL, *new_item_owner = NULL; + struct configfs_fragment *frag; char *name; sd = dentry->d_parent->d_fsdata; @@ -1278,6 +1315,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode goto out; } + frag = new_fragment(); + if (!frag) { + ret = -ENOMEM; + goto out; + } + /* Get a working ref for the duration of this function */ parent_item = configfs_get_config_item(dentry->d_parent); type = parent_item->ci_type; @@ -1380,9 +1423,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode spin_unlock(&configfs_dirent_lock); if (group) - ret = configfs_attach_group(parent_item, item, dentry); + ret = configfs_attach_group(parent_item, item, dentry, frag); else - ret = configfs_attach_item(parent_item, item, dentry); + ret = configfs_attach_item(parent_item, item, dentry, frag); spin_lock(&configfs_dirent_lock); sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; @@ -1419,6 +1462,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode * reference. */ config_item_put(parent_item); + put_fragment(frag); out: return ret; @@ -1430,6 +1474,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) struct config_item *item; struct configfs_subsystem *subsys; struct configfs_dirent *sd; + struct configfs_fragment *frag; struct module *subsys_owner = NULL, *dead_item_owner = NULL; int ret; @@ -1487,6 +1532,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) } } while (ret == -EAGAIN); + frag = sd->s_frag; + if (down_write_killable(&frag->frag_sem)) { + spin_lock(&configfs_dirent_lock); + configfs_detach_rollback(dentry); + spin_unlock(&configfs_dirent_lock); + return -EINTR; + } + frag->frag_dead = true; + up_write(&frag->frag_sem); + /* Get a working ref for the duration of this function */ item = configfs_get_config_item(dentry); @@ -1587,7 +1642,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file) */ err = -ENOENT; if (configfs_dirent_is_ready(parent_sd)) { - file->private_data = configfs_new_dirent(parent_sd, NULL, 0); + file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL); if (IS_ERR(file->private_data)) err = PTR_ERR(file->private_data); else @@ -1743,8 +1798,13 @@ int configfs_register_group(struct config_group *parent_group, { struct configfs_subsystem *subsys = parent_group->cg_subsys; struct dentry *parent; + struct configfs_fragment *frag; int ret; + frag = new_fragment(); + if (!frag) + return -ENOMEM; + mutex_lock(&subsys->su_mutex); link_group(parent_group, group); mutex_unlock(&subsys->su_mutex); @@ -1752,7 +1812,7 @@ int configfs_register_group(struct config_group *parent_group, parent = parent_group->cg_item.ci_dentry; inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); - ret = create_default_group(parent_group, group); + ret = create_default_group(parent_group, group, frag); if (ret) goto err_out; @@ -1760,12 +1820,14 @@ int configfs_register_group(struct config_group *parent_group, configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); spin_unlock(&configfs_dirent_lock); inode_unlock(d_inode(parent)); + put_fragment(frag); return 0; err_out: inode_unlock(d_inode(parent)); mutex_lock(&subsys->su_mutex); unlink_group(group); mutex_unlock(&subsys->su_mutex); + put_fragment(frag); return ret; } EXPORT_SYMBOL(configfs_register_group); @@ -1781,16 +1843,12 @@ void configfs_unregister_group(struct config_group *group) struct configfs_subsystem *subsys = group->cg_subsys; struct dentry *dentry = group->cg_item.ci_dentry; struct dentry *parent = group->cg_item.ci_parent->ci_dentry; + struct configfs_dirent *sd = dentry->d_fsdata; + struct configfs_fragment *frag = sd->s_frag; - mutex_lock(&subsys->su_mutex); - if (!group->cg_item.ci_parent->ci_group) { - /* - * The parent has already been unlinked and detached - * due to a rmdir. - */ - goto unlink_group; - } - mutex_unlock(&subsys->su_mutex); + down_write(&frag->frag_sem); + frag->frag_dead = true; + up_write(&frag->frag_sem); inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); spin_lock(&configfs_dirent_lock); @@ -1806,7 +1864,6 @@ void configfs_unregister_group(struct config_group *group) dput(dentry); mutex_lock(&subsys->su_mutex); -unlink_group: unlink_group(group); mutex_unlock(&subsys->su_mutex); } @@ -1863,10 +1920,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) struct dentry *dentry; struct dentry *root; struct configfs_dirent *sd; + struct configfs_fragment *frag; + + frag = new_fragment(); + if (!frag) + return -ENOMEM; root = configfs_pin_fs(); - if (IS_ERR(root)) + if (IS_ERR(root)) { + put_fragment(frag); return PTR_ERR(root); + } if (!group->cg_item.ci_name) group->cg_item.ci_name = group->cg_item.ci_namebuf; @@ -1882,7 +1946,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) d_add(dentry, NULL); err = configfs_attach_group(sd->s_element, &group->cg_item, - dentry); + dentry, frag); if (err) { BUG_ON(d_inode(dentry)); d_drop(dentry); @@ -1900,6 +1964,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) unlink_group(group); configfs_release_fs(); } + put_fragment(frag); return err; } @@ -1909,12 +1974,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) struct config_group *group = &subsys->su_group; struct dentry *dentry = group->cg_item.ci_dentry; struct dentry *root = dentry->d_sb->s_root; + struct configfs_dirent *sd = dentry->d_fsdata; + struct configfs_fragment *frag = sd->s_frag; if (dentry->d_parent != root) { pr_err("Tried to unregister non-subsystem!\n"); return; } + down_write(&frag->frag_sem); + frag->frag_dead = true; + up_write(&frag->frag_sem); + inode_lock_nested(d_inode(root), I_MUTEX_PARENT); inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 2c6312db85168050f6ee35d0e82465d994e7a1ea..7285440bc62e83049dab488fabe0ad6dc096e38f 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -53,40 +53,44 @@ struct configfs_buffer { bool write_in_progress; char *bin_buffer; int bin_buffer_size; + int cb_max_size; + struct config_item *item; + struct module *owner; + union { + struct configfs_attribute *attr; + struct configfs_bin_attribute *bin_attr; + }; }; +static inline struct configfs_fragment *to_frag(struct file *file) +{ + struct configfs_dirent *sd = file->f_path.dentry->d_fsdata; -/** - * fill_read_buffer - allocate and fill buffer from item. - * @dentry: dentry pointer. - * @buffer: data buffer for file. - * - * Allocate @buffer->page, if it hasn't been already, then call the - * config_item's show() method to fill the buffer with this attribute's - * data. - * This is called only once, on the file's first read. - */ -static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer) + return sd->s_frag; +} + +static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer) { - struct configfs_attribute * attr = to_attr(dentry); - struct config_item * item = to_item(dentry->d_parent); - int ret = 0; - ssize_t count; + struct configfs_fragment *frag = to_frag(file); + ssize_t count = -ENOENT; if (!buffer->page) buffer->page = (char *) get_zeroed_page(GFP_KERNEL); if (!buffer->page) return -ENOMEM; - count = attr->show(item, buffer->page); - - BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE); - if (count >= 0) { - buffer->needs_read_fill = 0; - buffer->count = count; - } else - ret = count; - return ret; + down_read(&frag->frag_sem); + if (!frag->frag_dead) + count = buffer->attr->show(buffer->item, buffer->page); + up_read(&frag->frag_sem); + + if (count < 0) + return count; + if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE)) + return -EIO; + buffer->needs_read_fill = 0; + buffer->count = count; + return 0; } /** @@ -111,12 +115,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf static ssize_t configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct configfs_buffer * buffer = file->private_data; + struct configfs_buffer *buffer = file->private_data; ssize_t retval = 0; mutex_lock(&buffer->mutex); if (buffer->needs_read_fill) { - if ((retval = fill_read_buffer(file->f_path.dentry,buffer))) + retval = fill_read_buffer(file, buffer); + if (retval) goto out; } pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", @@ -152,10 +157,8 @@ static ssize_t configfs_read_bin_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { + struct configfs_fragment *frag = to_frag(file); struct configfs_buffer *buffer = file->private_data; - struct dentry *dentry = file->f_path.dentry; - struct config_item *item = to_item(dentry->d_parent); - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); ssize_t retval = 0; ssize_t len = min_t(size_t, count, PAGE_SIZE); @@ -166,18 +169,23 @@ configfs_read_bin_file(struct file *file, char __user *buf, retval = -ETXTBSY; goto out; } - buffer->read_in_progress = 1; + buffer->read_in_progress = true; if (buffer->needs_read_fill) { /* perform first read with buf == NULL to get extent */ - len = bin_attr->read(item, NULL, 0); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + len = buffer->bin_attr->read(buffer->item, NULL, 0); + else + len = -ENOENT; + up_read(&frag->frag_sem); if (len <= 0) { retval = len; goto out; } /* do not exceed the maximum value */ - if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) { + if (buffer->cb_max_size && len > buffer->cb_max_size) { retval = -EFBIG; goto out; } @@ -190,7 +198,13 @@ configfs_read_bin_file(struct file *file, char __user *buf, buffer->bin_buffer_size = len; /* perform second read to fill buffer */ - len = bin_attr->read(item, buffer->bin_buffer, len); + down_read(&frag->frag_sem); + if (!frag->frag_dead) + len = buffer->bin_attr->read(buffer->item, + buffer->bin_buffer, len); + else + len = -ENOENT; + up_read(&frag->frag_sem); if (len < 0) { retval = len; vfree(buffer->bin_buffer); @@ -240,25 +254,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size return error ? -EFAULT : count; } - -/** - * flush_write_buffer - push buffer to config_item. - * @dentry: dentry to the attribute - * @buffer: data buffer for file. - * @count: number of bytes - * - * Get the correct pointers for the config_item and the attribute we're - * dealing with, then call the store() method for the attribute, - * passing the buffer that we acquired in fill_write_buffer(). - */ - static int -flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count) +flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count) { - struct configfs_attribute * attr = to_attr(dentry); - struct config_item * item = to_item(dentry->d_parent); - - return attr->store(item, buffer->page, count); + struct configfs_fragment *frag = to_frag(file); + int res = -ENOENT; + + down_read(&frag->frag_sem); + if (!frag->frag_dead) + res = buffer->attr->store(buffer->item, buffer->page, count); + up_read(&frag->frag_sem); + return res; } @@ -282,13 +288,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size static ssize_t configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct configfs_buffer * buffer = file->private_data; + struct configfs_buffer *buffer = file->private_data; ssize_t len; mutex_lock(&buffer->mutex); len = fill_write_buffer(buffer, buf, count); if (len > 0) - len = flush_write_buffer(file->f_path.dentry, buffer, len); + len = flush_write_buffer(file, buffer, len); if (len > 0) *ppos += len; mutex_unlock(&buffer->mutex); @@ -313,8 +319,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct configfs_buffer *buffer = file->private_data; - struct dentry *dentry = file->f_path.dentry; - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); void *tbuf = NULL; ssize_t len; @@ -325,13 +329,13 @@ configfs_write_bin_file(struct file *file, const char __user *buf, len = -ETXTBSY; goto out; } - buffer->write_in_progress = 1; + buffer->write_in_progress = true; /* buffer grows? */ if (*ppos + count > buffer->bin_buffer_size) { - if (bin_attr->cb_max_size && - *ppos + count > bin_attr->cb_max_size) { + if (buffer->cb_max_size && + *ppos + count > buffer->cb_max_size) { len = -EFBIG; goto out; } @@ -363,31 +367,51 @@ configfs_write_bin_file(struct file *file, const char __user *buf, return len; } -static int check_perm(struct inode * inode, struct file * file, int type) +static int __configfs_open_file(struct inode *inode, struct file *file, int type) { - struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent); - struct configfs_attribute * attr = to_attr(file->f_path.dentry); - struct configfs_bin_attribute *bin_attr = NULL; - struct configfs_buffer * buffer; - struct configfs_item_operations * ops = NULL; - int error = 0; + struct dentry *dentry = file->f_path.dentry; + struct configfs_fragment *frag = to_frag(file); + struct configfs_attribute *attr; + struct configfs_buffer *buffer; + int error; - if (!item || !attr) - goto Einval; + error = -ENOMEM; + buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL); + if (!buffer) + goto out; - if (type & CONFIGFS_ITEM_BIN_ATTR) - bin_attr = to_bin_attr(file->f_path.dentry); + error = -ENOENT; + down_read(&frag->frag_sem); + if (unlikely(frag->frag_dead)) + goto out_free_buffer; - /* Grab the module reference for this attribute if we have one */ - if (!try_module_get(attr->ca_owner)) { - error = -ENODEV; - goto Done; + error = -EINVAL; + buffer->item = to_item(dentry->d_parent); + if (!buffer->item) + goto out_free_buffer; + + attr = to_attr(dentry); + if (!attr) + goto out_put_item; + + if (type & CONFIGFS_ITEM_BIN_ATTR) { + buffer->bin_attr = to_bin_attr(dentry); + buffer->cb_max_size = buffer->bin_attr->cb_max_size; + } else { + buffer->attr = attr; } - if (item->ci_type) - ops = item->ci_type->ct_item_ops; - else - goto Eaccess; + buffer->owner = attr->ca_owner; + /* Grab the module reference for this attribute if we have one */ + error = -ENODEV; + if (!try_module_get(buffer->owner)) + goto out_put_item; + + error = -EACCES; + if (!buffer->item->ci_type) + goto out_put_module; + + buffer->ops = buffer->item->ci_type->ct_item_ops; /* File needs write support. * The inode's perms must say it's ok, @@ -395,13 +419,11 @@ static int check_perm(struct inode * inode, struct file * file, int type) */ if (file->f_mode & FMODE_WRITE) { if (!(inode->i_mode & S_IWUGO)) - goto Eaccess; - + goto out_put_module; if ((type & CONFIGFS_ITEM_ATTR) && !attr->store) - goto Eaccess; - - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write) - goto Eaccess; + goto out_put_module; + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write) + goto out_put_module; } /* File needs read support. @@ -410,92 +432,72 @@ static int check_perm(struct inode * inode, struct file * file, int type) */ if (file->f_mode & FMODE_READ) { if (!(inode->i_mode & S_IRUGO)) - goto Eaccess; - + goto out_put_module; if ((type & CONFIGFS_ITEM_ATTR) && !attr->show) - goto Eaccess; - - if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read) - goto Eaccess; + goto out_put_module; + if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read) + goto out_put_module; } - /* No error? Great, allocate a buffer for the file, and store it - * it in file->private_data for easy access. - */ - buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL); - if (!buffer) { - error = -ENOMEM; - goto Enomem; - } mutex_init(&buffer->mutex); buffer->needs_read_fill = 1; - buffer->read_in_progress = 0; - buffer->write_in_progress = 0; - buffer->ops = ops; + buffer->read_in_progress = false; + buffer->write_in_progress = false; file->private_data = buffer; - goto Done; + up_read(&frag->frag_sem); + return 0; - Einval: - error = -EINVAL; - goto Done; - Eaccess: - error = -EACCES; - Enomem: - module_put(attr->ca_owner); - Done: - if (error && item) - config_item_put(item); +out_put_module: + module_put(buffer->owner); +out_put_item: + config_item_put(buffer->item); +out_free_buffer: + up_read(&frag->frag_sem); + kfree(buffer); +out: return error; } static int configfs_release(struct inode *inode, struct file *filp) { - struct config_item * item = to_item(filp->f_path.dentry->d_parent); - struct configfs_attribute * attr = to_attr(filp->f_path.dentry); - struct module * owner = attr->ca_owner; - struct configfs_buffer * buffer = filp->private_data; - - if (item) - config_item_put(item); - /* After this point, attr should not be accessed. */ - module_put(owner); - - if (buffer) { - if (buffer->page) - free_page((unsigned long)buffer->page); - mutex_destroy(&buffer->mutex); - kfree(buffer); - } + struct configfs_buffer *buffer = filp->private_data; + + module_put(buffer->owner); + if (buffer->page) + free_page((unsigned long)buffer->page); + mutex_destroy(&buffer->mutex); + kfree(buffer); return 0; } static int configfs_open_file(struct inode *inode, struct file *filp) { - return check_perm(inode, filp, CONFIGFS_ITEM_ATTR); + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR); } static int configfs_open_bin_file(struct inode *inode, struct file *filp) { - return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR); + return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR); } -static int configfs_release_bin_file(struct inode *inode, struct file *filp) +static int configfs_release_bin_file(struct inode *inode, struct file *file) { - struct configfs_buffer *buffer = filp->private_data; - struct dentry *dentry = filp->f_path.dentry; - struct config_item *item = to_item(dentry->d_parent); - struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry); - ssize_t len = 0; - int ret; + struct configfs_buffer *buffer = file->private_data; - buffer->read_in_progress = 0; + buffer->read_in_progress = false; if (buffer->write_in_progress) { - buffer->write_in_progress = 0; - - len = bin_attr->write(item, buffer->bin_buffer, - buffer->bin_buffer_size); - + struct configfs_fragment *frag = to_frag(file); + buffer->write_in_progress = false; + + down_read(&frag->frag_sem); + if (!frag->frag_dead) { + /* result of ->release() is ignored */ + buffer->bin_attr->write(buffer->item, + buffer->bin_buffer, + buffer->bin_buffer_size); + } + up_read(&frag->frag_sem); /* vfree on NULL is safe */ vfree(buffer->bin_buffer); buffer->bin_buffer = NULL; @@ -503,10 +505,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp) buffer->needs_read_fill = 1; } - ret = configfs_release(inode, filp); - if (len < 0) - return len; - return ret; + configfs_release(inode, file); + return 0; } @@ -541,7 +541,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL); error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, - CONFIGFS_ITEM_ATTR); + CONFIGFS_ITEM_ATTR, parent_sd->s_frag); inode_unlock(d_inode(dir)); return error; @@ -563,7 +563,7 @@ int configfs_create_bin_file(struct config_item *item, inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL); error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode, - CONFIGFS_ITEM_BIN_ATTR); + CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag); inode_unlock(dir->d_inode); return error; diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index fea6db1ee0657483e3c1ad57a4145ed022ff3b2e..afd79a1a34b3e18ee466cb76faa673f29efab3b2 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -157,11 +157,42 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna !type->ct_item_ops->allow_link) goto out_put; + /* + * This is really sick. What they wanted was a hybrid of + * link(2) and symlink(2) - they wanted the target resolved + * at syscall time (as link(2) would've done), be a directory + * (which link(2) would've refused to do) *AND* be a deep + * fucking magic, making the target busy from rmdir POV. + * symlink(2) is nothing of that sort, and the locking it + * gets matches the normal symlink(2) semantics. Without + * attempts to resolve the target (which might very well + * not even exist yet) done prior to locking the parent + * directory. This perversion, OTOH, needs to resolve + * the target, which would lead to obvious deadlocks if + * attempted with any directories locked. + * + * Unfortunately, that garbage is userland ABI and we should've + * said "no" back in 2005. Too late now, so we get to + * play very ugly games with locking. + * + * Try *ANYTHING* of that sort in new code, and you will + * really regret it. Just ask yourself - what could a BOFH + * do to me and do I want to find it out first-hand? + * + * AV, a thoroughly annoyed bastard. + */ + inode_unlock(dir); ret = get_target(symname, &path, &target_item, dentry->d_sb); + inode_lock(dir); if (ret) goto out_put; - ret = type->ct_item_ops->allow_link(parent_item, target_item); + if (dentry->d_inode || d_unhashed(dentry)) + ret = -EEXIST; + else + ret = inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (!ret) + ret = type->ct_item_ops->allow_link(parent_item, target_item); if (!ret) { mutex_lock(&configfs_symlink_mutex); ret = create_link(parent_item, target_item, dentry); diff --git a/fs/dlm/member.c b/fs/dlm/member.c index 9c47f1c14a8badb59048a88cf33b674e93a679e4..a47ae99f7bcbcf91f54123d62ea3ba4beb864baf 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -683,7 +683,7 @@ int dlm_ls_start(struct dlm_ls *ls) error = dlm_config_nodes(ls->ls_name, &nodes, &count); if (error < 0) - goto fail; + goto fail_rv; spin_lock(&ls->ls_recover_lock); @@ -715,8 +715,9 @@ int dlm_ls_start(struct dlm_ls *ls) return 0; fail: - kfree(rv); kfree(nodes); + fail_rv: + kfree(rv); return error; } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 9ac65914ab5b0766b974e2837e8607118f04526b..57f2aacec97f59df9cf39f7dd5308a8d075f4547 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -700,7 +700,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat, result.version[0] = DLM_DEVICE_VERSION_MAJOR; result.version[1] = DLM_DEVICE_VERSION_MINOR; result.version[2] = DLM_DEVICE_VERSION_PATCH; - memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb)); + memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr)); result.user_lksb = ua->user_lksb; /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5c5ff9f6fe0798424897101ddd7a1c6b5eb14bb9..2a5e436ff8ddc843c92621ff9b83ad3dfb106bfb 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -326,9 +326,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode) static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, struct dentry *lower_dentry) { - struct inode *inode, *lower_inode = d_inode(lower_dentry); + struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent); + struct inode *inode, *lower_inode; struct ecryptfs_dentry_info *dentry_info; - struct vfsmount *lower_mnt; int rc = 0; dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); @@ -340,16 +340,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, return ERR_PTR(-ENOMEM); } - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); fsstack_copy_attr_atime(d_inode(dentry->d_parent), - d_inode(lower_dentry->d_parent)); + d_inode(path->dentry)); BUG_ON(!d_count(lower_dentry)); ecryptfs_set_dentry_private(dentry, dentry_info); - dentry_info->lower_path.mnt = lower_mnt; + dentry_info->lower_path.mnt = mntget(path->mnt); dentry_info->lower_path.dentry = lower_dentry; - if (d_really_is_negative(lower_dentry)) { + /* + * negative dentry can go positive under us here - its parent is not + * locked. That's OK and that could happen just as we return from + * ecryptfs_lookup() anyway. Just need to be careful and fetch + * ->d_inode only once - it's not stable here. + */ + lower_inode = READ_ONCE(lower_dentry->d_inode); + + if (!lower_inode) { /* We want to add because we couldn't find in lower */ d_add(dentry, NULL); return NULL; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2080bee85de4f3ccf14c979ecfab4a67a909c1c8..25b524c993e9071783e16e61ab06070012267c28 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3757,8 +3757,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, * illegal. */ if (ee_block != map->m_lblk || ee_len > map->m_len) { -#ifdef EXT4_DEBUG - ext4_warning("Inode (%ld) finished: extent logical block %llu," +#ifdef CONFIG_EXT4_DEBUG + ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," " len %u; IO logical block %llu, len %u", inode->i_ino, (unsigned long long)ee_block, ee_len, (unsigned long long)map->m_lblk, map->m_len); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5578aa8abfd1a6921fbe4da2ca0aed90376fd3fe..c7b58657bfb17883b63308d0b56757a9aee06ddc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4013,6 +4013,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) trace_ext4_punch_hole(inode, offset, length, 0); + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); + if (ext4_has_inline_data(inode)) { + down_write(&EXT4_I(inode)->i_mmap_sem); + ret = ext4_convert_inline_data(inode); + up_write(&EXT4_I(inode)->i_mmap_sem); + if (ret) + return ret; + } + /* * Write out all dirty pages to avoid race conditions * Then release them. diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ab4f2e9b47a6578949a15bbd232c901b740c11b8..4431e6aae3d9c87b0c11555a03be86abc945bff9 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3431,11 +3431,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) seg_i = CURSEG_I(sbi, i); segno = le32_to_cpu(ckpt->cur_data_segno[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); - if (blk_off > ENTRIES_IN_SUM) { - f2fs_bug_on(sbi, 1); - f2fs_put_page(page, 1); - return -EFAULT; - } seg_i->next_segno = segno; reset_curseg(sbi, i, 0); seg_i->alloc_type = ckpt->alloc_type[i]; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c old mode 100644 new mode 100755 index 67aaed9d60810f5dd00a4d04b631deb925565567..a5db4a6bfb3a461e8d8b3cea4ed26ca7bc018a7d --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2663,7 +2663,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) } } for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { - for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { + for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) { if (le32_to_cpu(ckpt->cur_node_segno[i]) == le32_to_cpu(ckpt->cur_data_segno[j])) { f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u", diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 81cecbe6d7cf6b193a416df0c6d4b5585255ec56..971e369517a734a45578213ba37a114c4eb1a6c9 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used, err = -ENOMEM; goto error; } + /* Avoid race with userspace read via bdev */ + lock_buffer(bhs[n]); memset(bhs[n]->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bhs[n]); + unlock_buffer(bhs[n]); mark_buffer_dirty_inode(bhs[n], dir); n++; @@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts) fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); de = (struct msdos_dir_entry *)bhs[0]->b_data; + /* Avoid race with userspace read via bdev */ + lock_buffer(bhs[0]); /* filling the new directory slots ("." and ".." entries) */ memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME); memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME); @@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts) de[0].size = de[1].size = 0; memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de)); set_buffer_uptodate(bhs[0]); + unlock_buffer(bhs[0]); mark_buffer_dirty_inode(bhs[0], dir); err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE); @@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots, /* fill the directory entry */ copy = min(size, sb->s_blocksize); + /* Avoid race with userspace read via bdev */ + lock_buffer(bhs[n]); memcpy(bhs[n]->b_data, slots, copy); - slots += copy; - size -= copy; set_buffer_uptodate(bhs[n]); + unlock_buffer(bhs[n]); mark_buffer_dirty_inode(bhs[n], dir); + slots += copy; + size -= copy; if (!size) break; n++; diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 18121c7507279b4db8f933b74614e1775c0891bc..404d6f410339c9e99023ef866d8149cef1c3c7e0 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -390,8 +390,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, err = -ENOMEM; goto error; } + /* Avoid race with userspace read via bdev */ + lock_buffer(c_bh); memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); set_buffer_uptodate(c_bh); + unlock_buffer(c_bh); mark_buffer_dirty_inode(c_bh, sbi->fat_inode); if (sb->s_flags & MS_SYNCHRONOUS) err = sync_dirty_buffer(c_bh); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index b088d636ea35d6d5f703947b7c78c47505cb4bc7..2a9e755877d47d25e221346a3e8cf12915b341a1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -582,10 +582,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, spin_unlock(&inode->i_lock); /* - * A dying wb indicates that the memcg-blkcg mapping has changed - * and a new wb is already serving the memcg. Switch immediately. + * A dying wb indicates that either the blkcg associated with the + * memcg changed or the associated memcg is dying. In the first + * case, a replacement wb should already be available and we should + * refresh the wb immediately. In the second case, trying to + * refresh will keep failing. */ - if (unlikely(wb_dying(wbc->wb))) + if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) inode_switch_wbs(inode, wbc->wb_id); } diff --git a/fs/fuse/control.c b/fs/fuse/control.c index e25c40c10f4fa8483093c14f7d47260bf244bef5..97ac2f5843fccb6730dceea67b9af0099c4ce75e 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -107,7 +107,7 @@ static ssize_t fuse_conn_max_background_read(struct file *file, if (!fc) return 0; - val = fc->max_background; + val = READ_ONCE(fc->max_background); fuse_conn_put(fc); return fuse_conn_limit_read(file, buf, len, ppos, val); @@ -144,7 +144,7 @@ static ssize_t fuse_conn_congestion_threshold_read(struct file *file, if (!fc) return 0; - val = fc->congestion_threshold; + val = READ_ONCE(fc->congestion_threshold); fuse_conn_put(fc); return fuse_conn_limit_read(file, buf, len, ppos, val); diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index c5b6b71654893b2bb7fe0276a40ac57ddb3350a0..d9aba970072670be069c548a4b3b849f2cc70e52 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -513,6 +513,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file) rc = cuse_send_init(cc); if (rc) { fuse_dev_free(fud); + fuse_conn_put(&cc->fc); return rc; } file->private_data = fud; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e43f1c6e44640c9f19be0c4dcf980d10a6ef2c94..108329e70307f54958f61ef9d7c709876fe02066 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1703,6 +1703,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, if (attr->ia_valid & ATTR_SIZE) is_truncate = true; + /* Flush dirty data/metadata before non-truncate SETATTR */ + if (is_wb && S_ISREG(inode->i_mode) && + attr->ia_valid & + (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET | + ATTR_TIMES_SET)) { + err = write_inode_now(inode, true); + if (err) + return err; + + fuse_set_nowrite(inode); + fuse_release_nowrite(inode); + } + if (is_truncate) { fuse_set_nowrite(inode); set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 5d31bf1a1168326b051aa8718c0048c71f43f135..b1ee8b7a5a9b026cbd134d0e2aa785958c833898 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) { struct fuse_conn *fc = get_fuse_conn(inode); int err; - bool lock_inode = (file->f_flags & O_TRUNC) && + bool is_wb_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc && fc->writeback_cache; @@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (err) return err; - if (lock_inode) + if (is_wb_truncate) { inode_lock(inode); + fuse_set_nowrite(inode); + } err = fuse_do_open(fc, get_node_id(inode), file, isdir); if (!err) fuse_finish_open(inode, file); - if (lock_inode) + if (is_wb_truncate) { + fuse_release_nowrite(inode); inode_unlock(inode); + } return err; } @@ -1725,6 +1729,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc) WARN_ON(wbc->sync_mode == WB_SYNC_ALL); redirty_page_for_writepage(wbc, page); + unlock_page(page); return 0; } diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 0731267072706f5fda5616580162b38cc508969d..0a80f6636549205b09c45064ded02bd339fd51da 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -630,7 +630,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) RB_CLEAR_NODE(&rs->rs_node); if (rs->rs_free) { - struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm); + u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) + + rs->rs_free - 1; + struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, }; + struct gfs2_bitmap *start, *last; /* return reserved blocks to the rgrp */ BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); @@ -641,7 +644,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) it will force the number to be recalculated later. */ rgd->rd_extfail_pt += rs->rs_free; rs->rs_free = 0; - clear_bit(GBF_FULL, &bi->bi_flags); + if (gfs2_rbm_from_block(&last_rbm, last_block)) + return; + start = rbm_bi(&rs->rs_rbm); + last = rbm_bi(&last_rbm); + do + clear_bit(GBF_FULL, &start->bi_flags); + while (start++ != last); } } @@ -1211,7 +1220,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd) rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); rl_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; - rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); + rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK); if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e3ee387a6dfebf14ef244829d317c71ddf47fce1..37496d83661a74585372eb8a7dda355ae892f8b6 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -844,10 +844,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) return error; + flush_workqueue(gfs2_delete_workqueue); kthread_stop(sdp->sd_quotad_process); kthread_stop(sdp->sd_logd_process); - flush_workqueue(gfs2_delete_workqueue); gfs2_quota_sync(sdp->sd_vfs, 0); gfs2_statfs_sync(sdp->sd_vfs, 0); diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index 2e713673df42f1fc45402b4d6c29b3c9d0643adf..85dab71bee74f0cc2e1a9e13674dfc9babf307a1 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c @@ -444,6 +444,7 @@ static int hfs_brec_update_parent(struct hfs_find_data *fd) /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } + new_node = NULL; } if (!rec && node->parent) diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 320f4372f1720a48c161916452adfc524b651817..77eff447d3014eb73fdb8a78a2f3ba07e193675d 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -219,25 +219,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) return node; } -struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +/* Make sure @tree has enough space for the @rsvd_nodes */ +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) { - struct hfs_bnode *node, *next_node; - struct page **pagep; - u32 nidx, idx; - unsigned off; - u16 off16; - u16 len; - u8 *data, byte, m; - int i; - - while (!tree->free_nodes) { - struct inode *inode = tree->inode; - u32 count; - int res; + struct inode *inode = tree->inode; + u32 count; + int res; + while (tree->free_nodes < rsvd_nodes) { res = hfs_extend_file(inode); if (res) - return ERR_PTR(res); + return res; HFS_I(inode)->phys_size = inode->i_size = (loff_t)HFS_I(inode)->alloc_blocks * HFS_SB(tree->sb)->alloc_blksz; @@ -245,9 +237,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) tree->sb->s_blocksize_bits; inode_set_bytes(inode, inode->i_size); count = inode->i_size >> tree->node_size_shift; - tree->free_nodes = count - tree->node_count; + tree->free_nodes += count - tree->node_count; tree->node_count = count; } + return 0; +} + +struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +{ + struct hfs_bnode *node, *next_node; + struct page **pagep; + u32 nidx, idx; + unsigned off; + u16 off16; + u16 len; + u8 *data, byte, m; + int i, res; + + res = hfs_bmap_reserve(tree, 1); + if (res) + return ERR_PTR(res); nidx = 0; node = hfs_bnode_find(tree, nidx); diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index f6bd266d70b55d8273fcffc362f347c336237ae1..2715f416b5a80c9a695fe91bc3080a3377d5e8e6 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -81,6 +81,7 @@ struct hfs_find_data { extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp); extern void hfs_btree_close(struct hfs_btree *); extern void hfs_btree_write(struct hfs_btree *); +extern int hfs_bmap_reserve(struct hfs_btree *, int); extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *); extern void hfs_bmap_free(struct hfs_bnode *node); diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c index 8a66405b0f8b5abddd78bc7b39b783649a44b687..d365bf0b8c77d66d9a2b34c23470539863e1edfd 100644 --- a/fs/hfs/catalog.c +++ b/fs/hfs/catalog.c @@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i if (err) return err; + /* + * Fail early and avoid ENOSPC during the btree operations. We may + * have to split the root node at most once. + */ + err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); + if (err) + goto err2; + hfs_cat_build_key(sb, fd.search_key, cnid, NULL); entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ? HFS_CDR_THD : HFS_CDR_FTH, @@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name, return err; dst_fd = src_fd; + /* + * Fail early and avoid ENOSPC during the btree operations. We may + * have to split the root node at most once. + */ + err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth); + if (err) + goto out; + /* find the old dir entry and read the data */ hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c index e33a0d36a93ebea7183909ff45f523302943b694..cbe4fca96378abd3510b38b2b2ad4a23493bef93 100644 --- a/fs/hfs/extent.c +++ b/fs/hfs/extent.c @@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) { if (res != -ENOENT) return res; + /* Fail early and avoid ENOSPC during the btree operation */ + res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); + if (res) + return res; hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec)); HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); } else { @@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type) return 0; blocks = 0; - for (i = 0; i < 3; extent++, i++) + for (i = 0; i < 3; i++) blocks += be16_to_cpu(extent[i].count); res = hfs_free_extents(sb, extent, blocks, blocks); @@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block, ablock = (u32)block / HFS_SB(sb)->fs_div; if (block >= HFS_I(inode)->fs_blocks) { - if (block > HFS_I(inode)->fs_blocks || !create) + if (!create) + return 0; + if (block > HFS_I(inode)->fs_blocks) return -EIO; if (ablock >= HFS_I(inode)->alloc_blocks) { res = hfs_extend_file(inode); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index f776acf2378a1cd5fdaa71984ba26e78248287c0..de0d6d4c46b689304cb06b44af31a10818db12c8 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -641,6 +641,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) truncate_setsize(inode, attr->ia_size); hfs_file_truncate(inode); + inode->i_atime = inode->i_mtime = inode->i_ctime = + current_time(inode); } setattr_copy(inode, attr); diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c index e5b221de7de636e7d7b126f8ce3c7c978e1d56c9..d7455ea702878a86a55b2af3df0e99e0f7551587 100644 --- a/fs/hfsplus/attributes.c +++ b/fs/hfsplus/attributes.c @@ -216,6 +216,11 @@ int hfsplus_create_attr(struct inode *inode, if (err) goto failed_init_create_attr; + /* Fail early and avoid ENOSPC during the btree operation */ + err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1); + if (err) + goto failed_create_attr; + if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); @@ -312,6 +317,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name) if (err) return err; + /* Fail early and avoid ENOSPC during the btree operation */ + err = hfs_bmap_reserve(fd.tree, fd.tree->depth); + if (err) + goto out; + if (name) { err = hfsplus_attr_build_key(sb, fd.search_key, inode->i_ino, name); diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 1002a0c08319b1a01bcf0e5a8737b7f3b7e141da..20ce698251ad1d9529d880ffcaf68e170809f79e 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c @@ -447,6 +447,7 @@ static int hfs_brec_update_parent(struct hfs_find_data *fd) /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } + new_node = NULL; } if (!rec && node->parent) diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 8d2256454efe6b4f9f737d046c24aa90b5c572d8..7e96b4c294f7ae0e062f9ebb0eee6193a81aac3c 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -341,26 +341,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) return node; } -struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +/* Make sure @tree has enough space for the @rsvd_nodes */ +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) { - struct hfs_bnode *node, *next_node; - struct page **pagep; - u32 nidx, idx; - unsigned off; - u16 off16; - u16 len; - u8 *data, byte, m; - int i; + struct inode *inode = tree->inode; + struct hfsplus_inode_info *hip = HFSPLUS_I(inode); + u32 count; + int res; - while (!tree->free_nodes) { - struct inode *inode = tree->inode; - struct hfsplus_inode_info *hip = HFSPLUS_I(inode); - u32 count; - int res; + if (rsvd_nodes <= 0) + return 0; + while (tree->free_nodes < rsvd_nodes) { res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree)); if (res) - return ERR_PTR(res); + return res; hip->phys_size = inode->i_size = (loff_t)hip->alloc_blocks << HFSPLUS_SB(tree->sb)->alloc_blksz_shift; @@ -368,9 +363,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift; inode_set_bytes(inode, inode->i_size); count = inode->i_size >> tree->node_size_shift; - tree->free_nodes = count - tree->node_count; + tree->free_nodes += count - tree->node_count; tree->node_count = count; } + return 0; +} + +struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) +{ + struct hfs_bnode *node, *next_node; + struct page **pagep; + u32 nidx, idx; + unsigned off; + u16 off16; + u16 len; + u8 *data, byte, m; + int i, res; + + res = hfs_bmap_reserve(tree, 1); + if (res) + return ERR_PTR(res); nidx = 0; node = hfs_bnode_find(tree, nidx); diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index a5e00f7a4c1434ba9d5718935c213fe0c2158868..947da72e72a303069cf0a396eff37af2650998f5 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -264,6 +264,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, if (err) return err; + /* + * Fail early and avoid ENOSPC during the btree operations. We may + * have to split the root node at most once. + */ + err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); + if (err) + goto err2; + hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid); entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ? @@ -332,6 +340,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str) if (err) return err; + /* + * Fail early and avoid ENOSPC during the btree operations. We may + * have to split the root node at most once. + */ + err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2); + if (err) + goto out; + if (!str) { int len; @@ -432,6 +448,14 @@ int hfsplus_rename_cat(u32 cnid, return err; dst_fd = src_fd; + /* + * Fail early and avoid ENOSPC during the btree operations. We may + * have to split the root node at most twice. + */ + err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1); + if (err) + goto out; + /* find the old dir entry and read the data */ err = hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index feca524ce2a5c7d6034bf0a528679fe923381507..d93c051559cb880231e8ab165c1ec6176a336aba 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -99,6 +99,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode, if (hip->extent_state & HFSPLUS_EXT_NEW) { if (res != -ENOENT) return res; + /* Fail early and avoid ENOSPC during the btree operation */ + res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); + if (res) + return res; hfs_brec_insert(fd, hip->cached_extents, sizeof(hfsplus_extent_rec)); hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); @@ -232,7 +236,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, ablock = iblock >> sbi->fs_shift; if (iblock >= hip->fs_blocks) { - if (iblock > hip->fs_blocks || !create) + if (!create) + return 0; + if (iblock > hip->fs_blocks) return -EIO; if (ablock >= hip->alloc_blocks) { res = hfsplus_file_extend(inode, false); diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index a3f03b24746376ddfcb8bac254613334ae4747b9..35cd703c66045c4f6c114e314f02ce4ac97f1726 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb) #define hfs_btree_open hfsplus_btree_open #define hfs_btree_close hfsplus_btree_close #define hfs_btree_write hfsplus_btree_write +#define hfs_bmap_reserve hfsplus_bmap_reserve #define hfs_bmap_alloc hfsplus_bmap_alloc #define hfs_bmap_free hfsplus_bmap_free #define hfs_bnode_read hfsplus_bnode_read @@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors, struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id); void hfs_btree_close(struct hfs_btree *tree); int hfs_btree_write(struct hfs_btree *tree); +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes); struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree); void hfs_bmap_free(struct hfs_bnode *node); diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 2e796f8302ffac30315530676585b05f49ab9cb7..cfd380e2743d124daf18850bd97a56574d09689f 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -260,6 +260,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) } truncate_setsize(inode, attr->ia_size); hfsplus_file_truncate(inode); + inode->i_mtime = inode->i_ctime = current_time(inode); } setattr_copy(inode, attr); diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c index 80317b04c84a23401fe8eae0b5bd5202145c0e7c..e431a850f2f2b8acef9f35a514d3f5b603ae8865 100644 --- a/fs/kernfs/symlink.c +++ b/fs/kernfs/symlink.c @@ -63,6 +63,9 @@ static int kernfs_get_target_path(struct kernfs_node *parent, if (base == kn) break; + if ((s - path) + 3 >= PATH_MAX) + return -ENAMETOOLONG; + strcpy(s, "../"); s += 3; base = base->parent; @@ -79,7 +82,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent, if (len < 2) return -EINVAL; len--; - if ((s - path) + len > PATH_MAX) + if ((s - path) + len >= PATH_MAX) return -ENAMETOOLONG; /* reverse fillup of target string from target to base */ diff --git a/fs/libfs.c b/fs/libfs.c index 9588780ad43e175c795580275f6ddc62be4f670a..9dc0e1ed82288a52c07330f3786bd82f2d71cece 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -85,58 +85,47 @@ int dcache_dir_close(struct inode *inode, struct file *file) EXPORT_SYMBOL(dcache_dir_close); /* parent is locked at least shared */ -static struct dentry *next_positive(struct dentry *parent, - struct list_head *from, - int count) +/* + * Returns an element of siblings' list. + * We are looking for th positive after

; if + * found, dentry is grabbed and passed to caller via *. + * If no such element exists, the anchor of list is returned + * and * is set to NULL. + */ +static struct list_head *scan_positives(struct dentry *cursor, + struct list_head *p, + loff_t count, + struct dentry **res) { - unsigned *seq = &parent->d_inode->i_dir_seq, n; - struct dentry *res; - struct list_head *p; - bool skipped; - int i; + struct dentry *dentry = cursor->d_parent, *found = NULL; -retry: - i = count; - skipped = false; - n = smp_load_acquire(seq) & ~1; - res = NULL; - rcu_read_lock(); - for (p = from->next; p != &parent->d_subdirs; p = p->next) { + spin_lock(&dentry->d_lock); + while ((p = p->next) != &dentry->d_subdirs) { struct dentry *d = list_entry(p, struct dentry, d_child); - if (!simple_positive(d)) { - skipped = true; - } else if (!--i) { - res = d; - break; + // we must at least skip cursors, to avoid livelocks + if (d->d_flags & DCACHE_DENTRY_CURSOR) + continue; + if (simple_positive(d) && !--count) { + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(d)) + found = dget_dlock(d); + spin_unlock(&d->d_lock); + if (likely(found)) + break; + count = 1; + } + if (need_resched()) { + list_move(&cursor->d_child, p); + p = &cursor->d_child; + spin_unlock(&dentry->d_lock); + cond_resched(); + spin_lock(&dentry->d_lock); } } - rcu_read_unlock(); - if (skipped) { - smp_rmb(); - if (unlikely(*seq != n)) - goto retry; - } - return res; -} - -static void move_cursor(struct dentry *cursor, struct list_head *after) -{ - struct dentry *parent = cursor->d_parent; - unsigned n, *seq = &parent->d_inode->i_dir_seq; - spin_lock(&parent->d_lock); - for (;;) { - n = *seq; - if (!(n & 1) && cmpxchg(seq, n, n + 1) == n) - break; - cpu_relax(); - } - __list_del(cursor->d_child.prev, cursor->d_child.next); - if (after) - list_add(&cursor->d_child, after); - else - list_add_tail(&cursor->d_child, &parent->d_subdirs); - smp_store_release(seq, n + 2); - spin_unlock(&parent->d_lock); + spin_unlock(&dentry->d_lock); + dput(*res); + *res = found; + return p; } loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) @@ -152,17 +141,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) return -EINVAL; } if (offset != file->f_pos) { + struct dentry *cursor = file->private_data; + struct dentry *to = NULL; + struct list_head *p; + file->f_pos = offset; - if (file->f_pos >= 2) { - struct dentry *cursor = file->private_data; - struct dentry *to; - loff_t n = file->f_pos - 2; - - inode_lock_shared(dentry->d_inode); - to = next_positive(dentry, &dentry->d_subdirs, n); - move_cursor(cursor, to ? &to->d_child : NULL); - inode_unlock_shared(dentry->d_inode); + inode_lock_shared(dentry->d_inode); + + if (file->f_pos > 2) { + p = scan_positives(cursor, &dentry->d_subdirs, + file->f_pos - 2, &to); + spin_lock(&dentry->d_lock); + list_move(&cursor->d_child, p); + spin_unlock(&dentry->d_lock); + } else { + spin_lock(&dentry->d_lock); + list_del_init(&cursor->d_child); + spin_unlock(&dentry->d_lock); } + + dput(to); + + inode_unlock_shared(dentry->d_inode); } return offset; } @@ -184,25 +184,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; struct dentry *cursor = file->private_data; - struct list_head *p = &cursor->d_child; - struct dentry *next; - bool moved = false; + struct list_head *anchor = &dentry->d_subdirs; + struct dentry *next = NULL; + struct list_head *p; if (!dir_emit_dots(file, ctx)) return 0; if (ctx->pos == 2) - p = &dentry->d_subdirs; - while ((next = next_positive(dentry, p, 1)) != NULL) { + p = anchor; + else + p = &cursor->d_child; + + while ((p = scan_positives(cursor, p, 1, &next)) != anchor) { if (!dir_emit(ctx, next->d_name.name, next->d_name.len, d_inode(next)->i_ino, dt_type(d_inode(next)))) break; - moved = true; - p = &next->d_child; ctx->pos++; } - if (moved) - move_cursor(cursor, p); + spin_lock(&dentry->d_lock); + list_move_tail(&cursor->d_child, p); + spin_unlock(&dentry->d_lock); + dput(next); + return 0; } EXPORT_SYMBOL(dcache_readdir); diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index dff600ae0d7477333a3e41749cffe58ad77c7e3a..9a8830a0f31f953dcd50e2f6b54de32fed7da760 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -52,6 +52,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation, return false; } +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode) +{ + struct nfs_delegation *delegation; + + delegation = rcu_dereference(NFS_I(inode)->delegation); + if (nfs4_is_valid_delegation(delegation, 0)) + return delegation; + return NULL; +} + static int nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) { @@ -91,7 +101,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags) return nfs4_do_check_delegation(inode, flags, false); } -static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) +static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid) { struct inode *inode = state->inode; struct file_lock *fl; @@ -106,7 +116,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ spin_lock(&flctx->flc_lock); restart: list_for_each_entry(fl, list, fl_list) { - if (nfs_file_open_context(fl->fl_file) != ctx) + if (nfs_file_open_context(fl->fl_file)->state != state) continue; spin_unlock(&flctx->flc_lock); status = nfs4_lock_delegation_recall(fl, state, stateid); @@ -153,7 +163,7 @@ static int nfs_delegation_claim_opens(struct inode *inode, seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); err = nfs4_open_delegation_recall(ctx, state, stateid, type); if (!err) - err = nfs_delegation_claim_locks(ctx, state, stateid); + err = nfs_delegation_claim_locks(state, stateid); if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) err = -EAGAIN; mutex_unlock(&sp->so_delegreturn_mutex); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index e9d55579687393552b0d83c46dfcafc56dbace89..2c6cb7fb7d5ee1c4c288d1486e782b028b15184b 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -62,6 +62,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred); +struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode); void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); int nfs4_have_delegation(struct inode *inode, fmode_t flags); int nfs4_check_delegation(struct inode *inode, fmode_t flags); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 8a0c301b0c69910c77ed7adceeef506e6ed1119a..7138383382ff12c510cfaf9cc5c8882efe95c6da 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (IS_ERR(inode)) { err = PTR_ERR(inode); switch (err) { - case -EPERM: - case -EACCES: - case -EDQUOT: - case -ENOSPC: - case -EROFS: - goto out_put_ctx; default: + goto out_put_ctx; + case -ENOENT: + case -ESTALE: + case -EISDIR: + case -ENOTDIR: + case -ELOOP: goto out_drop; } } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ea29c608be893588941f6c713b420b516b214341..ca4249ae644f293389bcea1bd85f11732440555d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1368,8 +1368,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, return 0; if ((delegation->type & fmode) != fmode) return 0; - if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) - return 0; switch (claim) { case NFS4_OPEN_CLAIM_NULL: case NFS4_OPEN_CLAIM_FH: @@ -1628,7 +1626,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) { struct nfs4_state *state = opendata->state; - struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; int open_mode = opendata->o_arg.open_flags; fmode_t fmode = opendata->o_arg.fmode; @@ -1645,7 +1642,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) } spin_unlock(&state->owner->so_lock); rcu_read_lock(); - delegation = rcu_dereference(nfsi->delegation); + delegation = nfs4_get_valid_delegation(state->inode); if (!can_open_delegated(delegation, fmode, claim)) { rcu_read_unlock(); break; @@ -2142,7 +2139,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) goto out_no_action; rcu_read_lock(); - delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); + delegation = nfs4_get_valid_delegation(data->state->inode); if (can_open_delegated(delegation, data->o_arg.fmode, claim)) goto unlock_no_action; rcu_read_unlock(); @@ -5544,6 +5541,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, } status = task->tk_status; if (setclientid.sc_cred) { + kfree(clp->cl_acceptor); clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); put_rpccred(setclientid.sc_cred); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5e2724a928ed3e19092d7878bb260393c983f894..d7f8d5ce30e3ead524655edde7b7e92f2fd49e5a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1123,7 +1123,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, } else *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME); } - if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) { + if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) { *p++ = cpu_to_be32(label->lfs); *p++ = cpu_to_be32(label->pi); *p++ = cpu_to_be32(label->len); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index fad4d5188aafa3a4c424b040060391504da7e6d1..b6e25126a0b0f3b44329bf002067a81aeb2cf596 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -562,7 +562,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, } hdr->res.fattr = &hdr->fattr; - hdr->res.count = count; + hdr->res.count = 0; hdr->res.eof = 0; hdr->res.verf = &hdr->verf; nfs_fattr_init(&hdr->fattr); diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index b7bca83039895b5692163d2a119f29c126ea5d83..06e72229be1232024aeea5a001d4495781b1bcaa 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -588,7 +588,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) /* Emulate the eof flag, which isn't normally needed in NFSv2 * as it is guaranteed to always return the file attributes */ - if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) + if ((hdr->res.count == 0 && hdr->args.count > 0) || + hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) hdr->res.eof = 1; } return 0; @@ -609,8 +610,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { - if (task->tk_status >= 0) + if (task->tk_status >= 0) { + hdr->res.count = hdr->args.count; nfs_writeback_update_inode(hdr); + } return 0; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c26d046adaaace43d9ffdfeae82a404819f0ec1f..6ad76397b31de03c7c7a5e3a7d90acb9a58fade2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2046,7 +2046,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping, inode->i_mtime = inode->i_ctime = current_time(inode); di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - ocfs2_update_inode_fsync_trans(handle, inode, 1); + if (handle) + ocfs2_update_inode_fsync_trans(handle, inode, 1); } if (handle) ocfs2_journal_dirty(handle, wc->w_di_bh); @@ -2143,13 +2144,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, struct ocfs2_dio_write_ctxt *dwc = NULL; struct buffer_head *di_bh = NULL; u64 p_blkno; - loff_t pos = iblock << inode->i_sb->s_blocksize_bits; + unsigned int i_blkbits = inode->i_sb->s_blocksize_bits; + loff_t pos = iblock << i_blkbits; + sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; unsigned len, total_len = bh_result->b_size; int ret = 0, first_get_block = 0; len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); len = min(total_len, len); + /* + * bh_result->b_size is count in get_more_blocks according to write + * "pos" and "end", we need map twice to return different buffer state: + * 1. area in file size, not set NEW; + * 2. area out file size, set NEW. + * + * iblock endblk + * |--------|---------|---------|--------- + * |<-------area in file------->| + */ + + if ((iblock <= endblk) && + ((iblock + ((len - 1) >> i_blkbits)) > endblk)) + len = (endblk - iblock + 1) << i_blkbits; + mlog(0, "get block of %lu at %llu:%u req %u\n", inode->i_ino, pos, len, total_len); @@ -2233,6 +2251,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, if (desc->c_needs_zero) set_buffer_new(bh_result); + if (iblock > endblk) + set_buffer_new(bh_result); + /* May sleep in end_io. It should not happen in a irq context. So defer * it to dio work queue. */ set_buffer_defer_completion(bh_result); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 935bac253991b00c920bb82bfa28486c4980bc12..1403c88f2b053fd521598360007b090f805478c0 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -98,25 +98,34 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, return ret; } +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it + * will be easier to handle read failure. + */ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, unsigned int nr, struct buffer_head *bhs[]) { int status = 0; unsigned int i; struct buffer_head *bh; + int new_bh = 0; trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); if (!nr) goto bail; + /* Don't put buffer head and re-assign it to NULL if it is allocated + * outside since the caller can't be aware of this alternation! + */ + new_bh = (bhs[0] == NULL); + for (i = 0 ; i < nr ; i++) { if (bhs[i] == NULL) { bhs[i] = sb_getblk(osb->sb, block++); if (bhs[i] == NULL) { status = -ENOMEM; mlog_errno(status); - goto bail; + break; } } bh = bhs[i]; @@ -156,9 +165,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, submit_bh(REQ_OP_READ, 0, bh); } +read_failure: for (i = nr; i > 0; i--) { bh = bhs[i - 1]; + if (unlikely(status)) { + if (new_bh && bh) { + /* If middle bh fails, let previous bh + * finish its read and then put it to + * aovoid bh leak + */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + put_bh(bh); + bhs[i - 1] = NULL; + } else if (bh && buffer_uptodate(bh)) { + clear_buffer_uptodate(bh); + } + continue; + } + /* No need to wait on the buffer if it's managed by JBD. */ if (!buffer_jbd(bh)) wait_on_buffer(bh); @@ -168,8 +194,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, * so we can safely record this and loop back * to cleanup the other buffers. */ status = -EIO; - put_bh(bh); - bhs[i - 1] = NULL; + goto read_failure; } } @@ -177,6 +202,9 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, return status; } +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it + * will be easier to handle read failure. + */ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, @@ -186,6 +214,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, int i, ignore_cache = 0; struct buffer_head *bh; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + int new_bh = 0; trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); @@ -211,6 +240,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, goto bail; } + /* Don't put buffer head and re-assign it to NULL if it is allocated + * outside since the caller can't be aware of this alternation! + */ + new_bh = (bhs[0] == NULL); + ocfs2_metadata_cache_io_lock(ci); for (i = 0 ; i < nr ; i++) { if (bhs[i] == NULL) { @@ -219,7 +253,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, ocfs2_metadata_cache_io_unlock(ci); status = -ENOMEM; mlog_errno(status); - goto bail; + /* Don't forget to put previous bh! */ + break; } } bh = bhs[i]; @@ -313,16 +348,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } } - status = 0; - +read_failure: for (i = (nr - 1); i >= 0; i--) { bh = bhs[i]; if (!(flags & OCFS2_BH_READAHEAD)) { - if (status) { - /* Clear the rest of the buffers on error */ - put_bh(bh); - bhs[i] = NULL; + if (unlikely(status)) { + /* Clear the buffers on error including those + * ever succeeded in reading + */ + if (new_bh && bh) { + /* If middle bh fails, let previous bh + * finish its read and then put it to + * aovoid bh leak + */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + put_bh(bh); + bhs[i] = NULL; + } else if (bh && buffer_uptodate(bh)) { + clear_buffer_uptodate(bh); + } continue; } /* We know this can't have changed as we hold the @@ -340,9 +386,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, * uptodate. */ status = -EIO; clear_buffer_needs_validate(bh); - put_bh(bh); - bhs[i] = NULL; - continue; + goto read_failure; } if (buffer_needs_validate(bh)) { @@ -352,11 +396,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, BUG_ON(buffer_jbd(bh)); clear_buffer_needs_validate(bh); status = validate(sb, bh); - if (status) { - put_bh(bh); - bhs[i] = NULL; - continue; - } + if (status) + goto read_failure; } } diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index e7b760deefaee65402416e187ad4e2b40fc13e62..32d60f69db24c84f8d816fd52e88e342bbddd7f0 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle) { char *buf; - buf = (char *) get_zeroed_page(GFP_NOFS); + buf = (char *) get_zeroed_page(GFP_ATOMIC); if (buf) { dump_mle(mle, buf, PAGE_SIZE - 1); free_page((unsigned long)buf); diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 1082b2c3014bed10ecafb04e2b17b1585acb0275..5f2a120240e511a205856684d6e951fce633c922 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, enum dlm_status status; int actions = 0; int in_use; - u8 owner; + u8 owner; + int recovery_wait = 0; mlog(0, "master_node = %d, valblk = %d\n", master_node, flags & LKM_VALBLK); @@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, } if (flags & LKM_CANCEL) lock->cancel_pending = 0; - else - lock->unlock_pending = 0; - + else { + if (!lock->unlock_pending) + recovery_wait = 1; + else + lock->unlock_pending = 0; + } } /* get an extra ref on lock. if we are just switching @@ -244,6 +248,17 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, spin_unlock(&res->spinlock); wake_up(&res->wq); + if (recovery_wait) { + spin_lock(&res->spinlock); + /* Unlock request will directly succeed after owner dies, + * and the lock is already removed from grant list. We have to + * wait for RECOVERING done or we miss the chance to purge it + * since the removement is much faster than RECOVERING proc. + */ + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING); + spin_unlock(&res->spinlock); + } + /* let the caller's final dlm_lock_put handle the actual kfree */ if (actions & DLM_UNLOCK_FREE_LOCK) { /* this should always be coupled with list removal */ diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 5729d55da67d037659235092be5e882233333160..2c3e975126b357648c6bca525c3bf8dee2f88703 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3421,7 +3421,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, * we can recover correctly from node failure. Otherwise, we may get * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. */ - if (!ocfs2_is_o2cb_active() && + if (ocfs2_userspace_stack(osb) && lockres->l_ops->flags & LOCK_TYPE_USES_LVB) lvb = 1; diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 4506ec5ec2ea624235a36c06cb755ef2211e0914..bfc44644301cac0acf1a365d24e7efc8c932b8d4 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -289,7 +289,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, if (inode_alloc) inode_lock(inode_alloc); - if (o2info_coherent(&fi->ifi_req)) { + if (inode_alloc && o2info_coherent(&fi->ifi_req)) { status = ocfs2_inode_lock(inode_alloc, &bh, 0); if (status < 0) { mlog_errno(status); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index fa947d36ae1d3eb37bbd92bf30f2b531159257d0..a30f63623db702aded5c96c4c130c858e193a218 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb) /* At this point, we know that no more recovery threads can be * launched, so wait for any recovery completion work to * complete. */ - flush_workqueue(osb->ocfs2_wq); + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); /* * Now that recovery is shut down, and the osb is about to be diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 5d53d0d63d199838b3fb5109e7e4eaa73fc6327e..ea38677daa06b87c64474bb3946d6c42d4388419 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) struct ocfs2_dinode *alloc = NULL; cancel_delayed_work(&osb->la_enable_wq); - flush_workqueue(osb->ocfs2_wq); + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); if (osb->local_alloc_state == OCFS2_LA_UNUSED) goto out; diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index c179afd0051a0284184e7202dee4a575d6b36350..afaa044f5f6bd3531a2b31a934ed6ff214c9ed17 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -25,6 +25,7 @@ #include "ocfs2_ioctl.h" #include "alloc.h" +#include "localalloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" @@ -222,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, struct ocfs2_refcount_tree *ref_tree = NULL; u32 new_phys_cpos, new_len; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + int need_free = 0; if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { @@ -315,6 +317,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, if (!partial) { context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ret = -ENOSPC; + need_free = 1; goto out_commit; } } @@ -339,6 +342,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, mlog_errno(ret); out_commit: + if (need_free && context->data_ac) { + struct ocfs2_alloc_context *data_ac = context->data_ac; + + if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL) + ocfs2_free_local_alloc_bits(osb, handle, data_ac, + new_phys_cpos, new_len); + else + ocfs2_free_clusters(handle, + data_ac->ac_inode, + data_ac->ac_bh, + ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos), + new_len); + } + ocfs2_commit_trans(osb, handle); out_unlock_mutex: diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 820359096c7aa8df5b76e2a0c1404ce7f9d0337d..52c07346bea3f8960399184c0e6aead7545a25f2 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; */ static struct ocfs2_stack_plugin *active_stack; -inline int ocfs2_is_o2cb_active(void) -{ - return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); -} -EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); - static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) { struct ocfs2_stack_plugin *p; diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index e3036e1790e86da7b4e13dcb0b8c88e3f19b6d50..f2dce10fae543c254dcb4e6628d357b60a3ac16c 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); -/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ -int ocfs2_is_o2cb_active(void); - extern struct kset *ocfs2_kset; #endif /* STACKGLUE_H */ diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c index a799546a67f779ef92f30f3bf959c3fd177b9292..f6172c3f83ba00aa006823e1c07d17c9517dcd14 100644 --- a/fs/orangefs/orangefs-sysfs.c +++ b/fs/orangefs/orangefs-sysfs.c @@ -315,7 +315,7 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj, /* Can't do a service_operation if the client is not running... */ rc = is_daemon_in_service(); if (rc) { - pr_info("%s: Client not running :%d:\n", + pr_info_ratelimited("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 57764da62332b38262c0b219d048c2fa7b32175b..7c52993b9a3b364520e0ed9126ac23857d86d2d9 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -482,7 +482,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, struct dentry *hardlink) { int err; - const struct cred *old_cred; + const struct cred *old_cred, *hold_cred = NULL; struct cred *override_cred; err = ovl_copy_up(dentry->d_parent); @@ -505,7 +505,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, goto out_revert_creds; } } - put_cred(override_creds(override_cred)); + hold_cred = override_creds(override_cred); put_cred(override_cred); if (!ovl_dentry_is_opaque(dentry)) @@ -516,7 +516,9 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, link, hardlink); } out_revert_creds: - ovl_revert_creds(old_cred); + ovl_revert_creds(old_cred ?: hold_cred); + if (old_cred && hold_cred) + put_cred(hold_cred); if (!err) { struct inode *realinode = d_inode(ovl_dentry_upper(dentry)); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 12c8e5e64db509bf28927529881884c4da25cfe3..5b5153bf0d2eaeee8f1479b1e756d66cb052ed8b 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -234,7 +234,8 @@ static bool ovl_can_list(const char *s) return true; /* Never list trusted.overlay, list other trusted for superuser only */ - return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); + return !ovl_is_private_xattr(s) && + ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 8ab782d8b33ddc7f4ac9e2fec72230ca0d4a060b..93d13f4010c147167ce4a3cb8ab6bf731041637e 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -164,6 +164,16 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, pfn, size, prot); } +/* + * Architectures which support memory encryption override this. + */ +ssize_t __weak +copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, + unsigned long offset, int userbuf) +{ + return copy_oldmem_page(pfn, buf, csize, offset, userbuf); +} + /* * Copy to either kernel or user space */ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 8ad65d43b65d892da966b678c4e2f1e653e2022f..d34085bf4a40be82acc56017e210b3e946e1a56d 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -4212,15 +4212,28 @@ xfs_bmapi_read( XFS_STATS_INC(mp, xs_blk_mapr); ifp = XFS_IFORK_PTR(ip, whichfork); + if (!ifp) { + /* No CoW fork? Return a hole. */ + if (whichfork == XFS_COW_FORK) { + mval->br_startoff = bno; + mval->br_startblock = HOLESTARTBLOCK; + mval->br_blockcount = len; + mval->br_state = XFS_EXT_NORM; + *nmap = 1; + return 0; + } - /* No CoW fork? Return a hole. */ - if (whichfork == XFS_COW_FORK && !ifp) { - mval->br_startoff = bno; - mval->br_startblock = HOLESTARTBLOCK; - mval->br_blockcount = len; - mval->br_state = XFS_EXT_NORM; - *nmap = 1; - return 0; + /* + * A missing attr ifork implies that the inode says we're in + * extents or btree format but failed to pass the inode fork + * verifier while trying to load it. Treat that as a file + * corruption too. + */ +#ifdef DEBUG + xfs_alert(mp, "%s: inode %llu missing fork %d", + __func__, ip->i_ino, whichfork); +#endif /* DEBUG */ + return -EFSCORRUPTED; } if (!(ifp->if_flags & XFS_IFEXTENTS)) { diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7a04b039039fdb6df7ef49f9358f412178fa8d85..b66c99329b7acfdc565b960ed10627baa138538f 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -57,6 +57,32 @@ static kmem_zone_t *xfs_buf_zone; #define xb_to_gfp(flags) \ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) +/* + * Locking orders + * + * xfs_buf_ioacct_inc: + * xfs_buf_ioacct_dec: + * b_sema (caller holds) + * b_lock + * + * xfs_buf_stale: + * b_sema (caller holds) + * b_lock + * lru_lock + * + * xfs_buf_rele: + * b_lock + * pag_buf_lock + * lru_lock + * + * xfs_buftarg_wait_rele + * lru_lock + * b_lock (trylock due to inversion) + * + * xfs_buftarg_isolate + * lru_lock + * b_lock (trylock due to inversion) + */ static inline int xfs_buf_is_vmapped( @@ -957,8 +983,18 @@ xfs_buf_rele( ASSERT(atomic_read(&bp->b_hold) > 0); - release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); + /* + * We grab the b_lock here first to serialise racing xfs_buf_rele() + * calls. The pag_buf_lock being taken on the last reference only + * serialises against racing lookups in xfs_buf_find(). IOWs, the second + * to last reference we drop here is not serialised against the last + * reference until we take bp->b_lock. Hence if we don't grab b_lock + * first, the last "release" reference can win the race to the lock and + * free the buffer before the second-to-last reference is processed, + * leading to a use-after-free scenario. + */ spin_lock(&bp->b_lock); + release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); if (!release) { /* * Drop the in-flight state if the buffer is already on the LRU @@ -1674,7 +1710,7 @@ xfs_buftarg_isolate( * zero. If the value is already zero, we need to reclaim the * buffer, otherwise it gets another trip through the LRU. */ - if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { + if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { spin_unlock(&bp->b_lock); return LRU_ROTATE; } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 67d589e0a49f04fd58ec9e001c65ad423dc2d66f..b16ca13c11d5481800714b82c0fcb87021250860 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1674,6 +1674,7 @@ xfs_fs_fill_super( out_close_devices: xfs_close_devices(mp); out_free_fsname: + sb->s_fs_info = NULL; xfs_free_fsname(mp); kfree(mp); out: @@ -1691,6 +1692,10 @@ xfs_fs_put_super( { struct xfs_mount *mp = XFS_M(sb); + /* if ->fill_super failed, we have no mount to tear down */ + if (!sb->s_fs_info) + return; + xfs_notice(mp, "Unmounting Filesystem"); xfs_filestream_unmount(mp); xfs_unmountfs(mp); @@ -1700,6 +1705,8 @@ xfs_fs_put_super( xfs_destroy_percpu_counters(mp); xfs_destroy_mount_workqueues(mp); xfs_close_devices(mp); + + sb->s_fs_info = NULL; xfs_free_fsname(mp); kfree(mp); } @@ -1719,6 +1726,9 @@ xfs_fs_nr_cached_objects( struct super_block *sb, struct shrink_control *sc) { + /* Paranoia: catch incorrect calls during mount setup or teardown */ + if (WARN_ON_ONCE(!sb->s_fs_info)) + return 0; return xfs_reclaim_inodes_count(XFS_M(sb)); } diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 1daeacbab1e7de510815c20575f657f406b95437..0c51f91bc3b3bd21e7d05a3d497adb2f626999df 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -794,6 +794,25 @@ struct drm_dp_aux_msg { size_t size; }; +struct cec_adapter; +struct edid; + +/** + * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX + * @lock: mutex protecting this struct + * @adap: the CEC adapter for CEC-Tunneling-over-AUX support. + * @name: name of the CEC adapter + * @parent: parent device of the CEC adapter + * @unregister_work: unregister the CEC adapter + */ +struct drm_dp_aux_cec { + struct mutex lock; + struct cec_adapter *adap; + const char *name; + struct device *parent; + struct delayed_work unregister_work; +}; + /** * struct drm_dp_aux - DisplayPort AUX channel * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter @@ -846,6 +865,10 @@ struct drm_dp_aux { * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. */ unsigned i2c_defer_count; + /** + * @cec: struct containing fields used for CEC-Tunneling-over-AUX. + */ + struct drm_dp_aux_cec cec; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, @@ -914,4 +937,37 @@ void drm_dp_aux_init(struct drm_dp_aux *aux); int drm_dp_aux_register(struct drm_dp_aux *aux); void drm_dp_aux_unregister(struct drm_dp_aux *aux); +#ifdef CONFIG_DRM_DP_CEC +void drm_dp_cec_irq(struct drm_dp_aux *aux); +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, + struct device *parent); +void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); +void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); +#else +static inline void drm_dp_cec_irq(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + const char *name, + struct device *parent) +{ +} + +static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) +{ +} + +static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, + const struct edid *edid) +{ +} + +static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux) +{ +} + +#endif + #endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 9c03895dc479dae56b84b8dd2f912bd499709eef..72bf408f887fc9de8e597bad49e766f45b94cd0f 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -42,6 +42,7 @@ struct drm_vma_offset_node { rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_root vm_files; + bool readonly:1; }; struct drm_vma_offset_manager { diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3b77588a93602eecd5d73d30d6fd77377286f07e..dec03c0dbc214f216b57a7081e65b197855d61bb 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -185,8 +185,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +/* + * The static inlines below do not handle constant nbits==0 correctly, + * so make such users (should any ever turn up) call the out-of-line + * versions. + */ #define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { @@ -316,7 +321,7 @@ static __always_inline int bitmap_weight(const unsigned long *src, unsigned int } static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, int nbits) + unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 9a6402a56787b3535ae15591c44197e671ee9161..480cd5afa01ee5e036f958dc0d82092602916e4d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -126,9 +126,10 @@ struct bio { #define BIO_BOUNCED 3 /* bio is a bounce bio */ #define BIO_USER_MAPPED 4 /* contains user pages */ #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ -#define BIO_QUIET 6 /* Make BIO Quiet */ -#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ -#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ +#define BIO_WORKINGSET 6 /* contains userspace workingset pages */ +#define BIO_QUIET 7 /* Make BIO Quiet */ +#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ +#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */ /* * Flags starting here get preserved by bio_reset() - this includes diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9f91d4806c12422c191c0411e5b815fa09aba8de..8647e7fb6261f3a00b721e8e200d1aa933b9243a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -216,6 +216,11 @@ struct request { (req)->cmd_flags |= flags; \ } while (0) +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return rq->cmd_type != REQ_TYPE_FS; +} + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -672,7 +677,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync) static inline bool rq_mergeable(struct request *rq) { - if (rq->cmd_type != REQ_TYPE_FS) + if (blk_rq_is_passthrough(rq)) return false; if (req_op(rq) == REQ_OP_FLUSH) @@ -925,7 +930,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, { struct request_queue *q = rq->q; - if (unlikely(rq->cmd_type != REQ_TYPE_FS)) + if (blk_rq_is_passthrough(rq)) return q->limits.max_hw_sectors; if (!q->limits.chunk_sectors || diff --git a/include/linux/bug.h b/include/linux/bug.h index a8837155309c96f08e680487b1c38ef7bab95347..34f28aba8748eec0afcd1ac34e511647f2d64e7a 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -114,6 +114,11 @@ int is_valid_bugaddr(unsigned long addr); #else /* !CONFIG_GENERIC_BUG */ +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b2547d96e24c1b3b3ddb8387bfadfb0cd8852d08..e5e2777dab8ccfd1c91c659c4991b7bcd8701033 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -56,6 +56,11 @@ extern ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf); +extern ssize_t cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, @@ -284,29 +289,8 @@ static inline int cpuhp_smt_enable(void) { return 0; } static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } #endif -/* - * These are used for a global "mitigations=" cmdline option for toggling - * optional CPU mitigations. - */ -enum cpu_mitigations { - CPU_MITIGATIONS_OFF, - CPU_MITIGATIONS_AUTO, - CPU_MITIGATIONS_AUTO_NOSMT, -}; - -extern enum cpu_mitigations cpu_mitigations; - -/* mitigations=off */ -static inline bool cpu_mitigations_off(void) -{ - return cpu_mitigations == CPU_MITIGATIONS_OFF; -} - -/* mitigations=auto,nosmt */ -static inline bool cpu_mitigations_auto_nosmt(void) -{ - return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; -} +extern bool cpu_mitigations_off(void); +extern bool cpu_mitigations_auto_nosmt(void); #define IDLE_START 1 #define IDLE_END 2 diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h index 986c06c88d81426b2d0629eb85994ba1f26616d8..84d3c81b59781563f57f66e87c685e2ad7ef191e 100644 --- a/include/linux/cpufeature.h +++ b/include/linux/cpufeature.h @@ -45,7 +45,7 @@ * 'asm/cpufeature.h' of your favorite architecture. */ #define module_cpu_feature_match(x, __initfunc) \ -static struct cpu_feature const cpu_feature_match_ ## x[] = \ +static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ { { .feature = cpu_feature(x) }, { } }; \ MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ \ diff --git a/include/linux/edac.h b/include/linux/edac.h index 9e0d78966552c102011ca84fb8e84a9f158dbf80..c6233227720c75c4224260ebd9bdea224b1c6123 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -17,6 +17,7 @@ #include #include #include +#include struct device; @@ -778,6 +779,6 @@ struct mem_ctl_info { /* * Maximum number of memory controllers in the coherent fabric. */ -#define EDAC_MAX_MCS 16 +#define EDAC_MAX_MCS 2 * MAX_NUMNODES #endif diff --git a/include/linux/fb.h b/include/linux/fb.h index 2b9ece87a4e87fe2fed7fde16bf1f2bfb7262029..04f39f27814c5a01af62fca65c70f6c1e79655f9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -741,8 +741,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); extern const unsigned char *fb_firmware_edid(struct device *device); extern void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs); -extern void fb_edid_add_monspecs(unsigned char *edid, - struct fb_monspecs *specs); extern void fb_destroy_modedb(struct fb_videomode *modedb); extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); @@ -816,7 +814,6 @@ struct dmt_videomode { extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; -extern const struct fb_videomode cea_modes[65]; extern const struct dmt_videomode dmt_modes[]; struct fb_modelist { diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 51649c97fb818ffccda473af59e3f4221e1e6efe..a2df87615e71945bf0191b09776a3f5131eeb180 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -291,6 +291,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } +/** + * gfpflags_normal_context - is gfp_flags a normal sleepable context? + * @gfp_flags: gfp_flags to test + * + * Test whether @gfp_flags indicates that the allocation is from the + * %current context and allowed to sleep. + * + * An allocation being allowed to block doesn't mean it owns the %current + * context. When direct reclaim path tries to allocate memory, the + * allocation context is nested inside whatever %current was doing at the + * time of the original allocation. The nested allocation may be allowed + * to block but modifying anything %current owns can corrupt the outer + * context's expectations. + * + * %true result from this function indicates that the allocation context + * can sleep and use anything that's associated with %current. + */ +static inline bool gfpflags_normal_context(const gfp_t gfp_flags) +{ + return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == + __GFP_DIRECT_RECLAIM; +} + #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else diff --git a/include/linux/hid.h b/include/linux/hid.h index fab65b61d6d4e582f63d7371ff0710d15da404eb..607acde65db3f9bd43b74642f2d25f25b90411e0 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -345,6 +345,7 @@ struct hid_item { #define HID_GROUP_RMI 0x0100 #define HID_GROUP_WACOM 0x0101 #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 +#define HID_GROUP_STEAM 0x0103 /* * This is the global environment of the parser. This information is diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index aa2b4e42af9430b61ac0a4e82b6d95d13e041e02..727cfc1301529dbf0ad81f4bfca0c8945ffbeaa9 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2651,4 +2651,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) return true; } +struct element { + u8 id; + u8 datalen; + u8 data[]; +} __packed; + +/* element iteration helpers */ +#define for_each_element(_elem, _data, _datalen) \ + for (_elem = (const struct element *)(_data); \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) && \ + (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ + (int)sizeof(*_elem) + _elem->datalen; \ + _elem = (const struct element *)(_elem->data + _elem->datalen)) + +#define for_each_element_id(element, _id, data, datalen) \ + for_each_element(element, data, datalen) \ + if (element->id == (_id)) + +#define for_each_element_extid(element, extid, data, datalen) \ + for_each_element(element, data, datalen) \ + if (element->id == WLAN_EID_EXTENSION && \ + element->datalen > 0 && \ + element->data[0] == (extid)) + +#define for_each_subelement(sub, element) \ + for_each_element(sub, (element)->data, (element)->datalen) + +#define for_each_subelement_id(sub, id, element) \ + for_each_element_id(sub, id, (element)->data, (element)->datalen) + +#define for_each_subelement_extid(sub, extid, element) \ + for_each_element_extid(sub, extid, (element)->data, (element)->datalen) + +/** + * for_each_element_completed - determine if element parsing consumed all data + * @element: element pointer after for_each_element() or friends + * @data: same data pointer as passed to for_each_element() or friends + * @datalen: same data length as passed to for_each_element() or friends + * + * This function returns %true if all the data was parsed or considered + * while walking the elements. Only use this if your for_each_element() + * loop cannot be broken out of, otherwise it always returns %false. + * + * If some data was malformed, this returns %false since the last parsed + * element will not fill the whole remaining data. + */ +static inline bool for_each_element_completed(const struct element *element, + const void *data, size_t datalen) +{ + return (const u8 *)element == (const u8 *)data + datalen; +} + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index e353f6600b0b25e045fcf82703d28c47d0925ad2..27dbab59f034c276c913094394418283742dec59 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -295,7 +295,8 @@ enum { #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 @@ -320,7 +321,8 @@ enum { #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) -#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index eb55374b73f30302c9dcd868fb70372ab94ecf2b..ab90a8541aaaf8ff5f905edcfe6da1abafb74257 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -129,7 +129,7 @@ static inline bool is_error_page(struct page *page) extern struct kmem_cache *kvm_vcpu_cache; -extern spinlock_t kvm_lock; +extern struct mutex kvm_lock; extern struct list_head vm_list; struct kvm_io_range { @@ -843,6 +843,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -1208,4 +1209,10 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ +typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); + +int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, + uintptr_t data, const char *name, + struct task_struct **thread_ptr); + #endif diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index b01d2944ebdd275735d7f496ba4c517b32d2a4a2..8850e243c9406f6c2b622faea3ab82286d886708 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -1,7 +1,7 @@ #ifndef _LIBFDT_ENV_H #define _LIBFDT_ENV_H -#include +#include /* For INT_MAX */ #include #include diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 134a2f69c21abf7921181af0adff033bb459edc5..9469eef300952cb0956a2d9f6a6dbe2816d5fe7b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -272,6 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); +extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h index cf815577bd6869d004f2e4d74a465500df82c4d9..3ae1fe743bc34b010431b54fdaad009dfba8ebc0 100644 --- a/include/linux/mfd/max8997.h +++ b/include/linux/mfd/max8997.h @@ -178,7 +178,6 @@ struct max8997_led_platform_data { struct max8997_platform_data { /* IRQ */ int ono; - int wakeup; /* ---- PMIC ---- */ struct max8997_regulator_data *regulators; diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h index 638222e43e489082dc83e167e1c976ed135fc5db..93011c61aafd21a3150eb779d09c791ae3d31a15 100644 --- a/include/linux/mfd/mc13xxx.h +++ b/include/linux/mfd/mc13xxx.h @@ -247,6 +247,7 @@ struct mc13xxx_platform_data { #define MC13XXX_ADC0_TSMOD0 (1 << 12) #define MC13XXX_ADC0_TSMOD1 (1 << 13) #define MC13XXX_ADC0_TSMOD2 (1 << 14) +#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15) #define MC13XXX_ADC0_ADINC1 (1 << 16) #define MC13XXX_ADC0_ADINC2 (1 << 17) diff --git a/include/linux/mm.h b/include/linux/mm.h index 40e6c2fa7350cedd1a4bf23ea5d6cb20cea20099..cbb1c054909f93b587a8ef44183bd453dbaf86af 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -523,11 +523,6 @@ static inline int is_vmalloc_or_module_addr(const void *x) extern void kvfree(const void *addr); -static inline atomic_t *compound_mapcount_ptr(struct page *page) -{ - return &page[1].compound_mapcount; -} - static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 91db63260ca68e5f61c152dfcc6e1f30a0c1cdc5..416a30c04fa4270917f6216dd0dce17d1b0a6203 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -262,6 +262,11 @@ struct page_frag_cache { typedef unsigned long vm_flags_t; +static inline atomic_t *compound_mapcount_ptr(struct page *page) +{ + return &page[1].compound_mapcount; +} + /* * A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4077d60c0e26f9298d7a1601ebdd0b79d6131a48..9c3c8aad4063569373ce452bf7af6ea69443dccc 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -556,12 +556,28 @@ static inline int PageTransCompound(struct page *page) * * Unlike PageTransCompound, this is safe to be called only while * split_huge_pmd() cannot run from under us, like if protected by the - * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * MMU notifier, otherwise it may result in page->_mapcount check false * positives. + * + * We have to treat page cache THP differently since every subpage of it + * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE + * mapped in the current process so comparing subpage's _mapcount to + * compound_mapcount to filter out PTE mapped case. */ static inline int PageTransCompoundMap(struct page *page) { - return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; + struct page *head; + + if (!PageTransCompound(page)) + return 0; + + if (PageAnon(page)) + return atomic_read(&page->_mapcount) < 0; + + head = compound_head(page); + /* File THP is PMD mapped and not PTE mapped */ + return atomic_read(&page->_mapcount) == + atomic_read(compound_mapcount_ptr(head)); } /* diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h index e82c642fa53cd49ac88afc412b4194319fc3303f..5913be0793a2608b8bd553509b15d63018e5f807 100644 --- a/include/linux/platform_data/dma-ep93xx.h +++ b/include/linux/platform_data/dma-ep93xx.h @@ -84,7 +84,7 @@ static inline enum dma_transfer_direction ep93xx_dma_chan_direction(struct dma_chan *chan) { if (!ep93xx_dma_chan_is_m2p(chan)) - return DMA_NONE; + return DMA_TRANS_NONE; /* even channels are for TX, odd for RX */ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f00fa86ac9660ad79e243f9c9e590bf854dcb48d..87733344768c4a53bed321f6d1449d8bfc779527 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -21,7 +21,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* i_mutex must being held */ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || + return (ia->ia_valid & ATTR_SIZE) || (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4a6c87d19004c8c18e78cb16276da24b6481deee..fdde51a16c876873bfb5913b8e91c20c6853ab9c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1182,7 +1182,8 @@ static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 return skb->hash; } -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) { diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 7ba040c797ec41ea41d46098bec89fba30daf603..da2791b5fe87939389bf1d703a2d04bd1d038602 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -185,7 +185,6 @@ struct rpc_timer { struct rpc_wait_queue { spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ - pid_t owner; /* process id of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char nr; /* # tasks remaining for cookie */ @@ -201,7 +200,6 @@ struct rpc_wait_queue { * from a single cookie. The aim is to improve * performance of NFS operations such as read/write. */ -#define RPC_BATCH_COUNT 16 #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) /* diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d9534927d93bc46005afb5214ef83c72544385b7..1505cf7a4aafa4bbdb395f0e6ecefc34dd9022a5 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -3,6 +3,7 @@ #include #include +#include #include /** @@ -151,7 +152,7 @@ struct flow_dissector { struct flow_keys { struct flow_dissector_key_control control; #define FLOW_KEYS_HASH_START_FIELD basic - struct flow_dissector_key_basic basic; + struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_keyid keyid; diff --git a/include/net/fq.h b/include/net/fq.h index 6d8521a30c5cf3951a868aea9b7817ffda749de4..2c76879027895f49154ae1a5c2eba3ac6019b184 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -70,7 +70,7 @@ struct fq { struct list_head backlogs; spinlock_t lock; u32 flows_cnt; - u32 perturbation; + siphash_key_t perturbation; u32 limit; u32 memory_limit; u32 memory_usage; diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index 4e6131cd3f43dfb711049a1925abbea6074ec25b..45a0d9a006a0ea136f43b61938087e598978f92f 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq, lockdep_assert_held(&fq->lock); - hash = skb_get_hash_perturb(skb, fq->perturbation); + hash = skb_get_hash_perturb(skb, &fq->perturbation); idx = reciprocal_scale(hash, fq->flows_cnt); flow = &fq->flows[idx]; @@ -252,7 +252,7 @@ static int fq_init(struct fq *fq, int flows_cnt) INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); fq->flows_cnt = max_t(u32, flows_cnt, 1); - fq->perturbation = prandom_u32(); + get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index cd6018a9ee2467cec1fbe4d621fc294bc843a05e..a26165744d98031082ac5b0e8bb1d78f21f0267d 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -887,6 +887,7 @@ struct netns_ipvs { struct delayed_work defense_work; /* Work handler */ int drop_rate; int drop_counter; + int old_secure_tcp; atomic_t dropentry; /* locks in ctl.c */ spinlock_t dropentry_lock; /* drop entry handling */ diff --git a/include/net/llc.h b/include/net/llc.h index 82d989995d18a491425dceb744d3af98a2745b9b..95e5ced4c1339d9d92f476577bc5988d4c8d5ab0 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -66,6 +66,7 @@ struct llc_sap { int sk_count; struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES]; struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES]; + struct rcu_head rcu; }; static inline diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index df528a6235487d3678ffdccb28facf977a1e7098..ea985aa7a6c5e64b6802c2399ad47db23fdbd9bf 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); /* Access to a connection */ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); -int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index bd82a2b4b888bc6d4f5190ba7b23378522272c5f..d770d1377abd74169e4273b624488a538bb2131d 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -426,8 +426,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { unsigned long now = jiffies; - if (neigh->used != now) - neigh->used = now; + if (READ_ONCE(neigh->used) != now) + WRITE_ONCE(neigh->used, now); if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) return __neigh_event_send(neigh, skb); return 0; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 66f6b84df2873fbc5984ff9583ad4eda6f9bfe98..7ba9a624090fbd344b48866a0925c11a242c7ada 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -705,7 +705,8 @@ struct nft_expr_ops { */ struct nft_expr { const struct nft_expr_ops *ops; - unsigned char data[]; + unsigned char data[] + __attribute__((aligned(__alignof__(u64)))); }; static inline void *nft_expr_priv(const struct nft_expr *expr) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 538f3c4458b0425c232de6ac06c9319f95201935..5d5a137b9067f2196adbae2265b1524c655c47f7 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -276,6 +276,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) return q; } +static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) +{ + return rcu_dereference_bh(qdisc->dev_queue->qdisc); +} + static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) { return qdisc->dev_queue->qdisc_sleeping; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 579ded2c6ef18860bb6bdc68f630bce1cacdd46f..32db5208854dc3b081f8ff3551f34c314a1209a8 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int); /* * sctp/socket.c */ +int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags); int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); diff --git a/include/net/sock.h b/include/net/sock.h index 5375467a2024a8846d3b02a86933dcbb0e63aaa5..0d7198e09c7925e15dcdd39a43a05f13ebb3ad5a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2052,12 +2052,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, * sk_page_frag - return an appropriate page_frag * @sk: socket * - * If socket allocation mode allows current thread to sleep, it means its - * safe to use the per task page_frag instead of the per socket one. + * Use the per task page_frag instead of the per socket one for + * optimization when we know that we're in the normal context and owns + * everything that's associated with %current. + * + * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest + * inside other socket operations and end up recursing into sk_page_frag() + * while it's already in use. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_allow_blocking(sk->sk_allocation)) + if (gfpflags_normal_context(sk->sk_allocation)) return ¤t->task_frag; return &sk->sk_frag; @@ -2144,7 +2149,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk) return kt; #else - return sk->sk_stamp; + return READ_ONCE(sk->sk_stamp); #endif } @@ -2155,7 +2160,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) sk->sk_stamp = kt; write_sequnlock(&sk->sk_stamp_seq); #else - sk->sk_stamp = kt; + WRITE_ONCE(sk->sk_stamp, kt); #endif } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a42535f252b5713ce4f661bd4cada6282211c35b..1d9c701b1c7b3da7cf4efce3efc169c8b81918f7 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -63,6 +63,7 @@ extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; +extern struct workqueue_struct *ib_comp_unbound_wq; union ib_gid { u8 raw[16]; @@ -1025,7 +1026,7 @@ struct ib_qp_init_attr { struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; - enum ib_qp_create_flags create_flags; + u32 create_flags; /* * Only needed for special QP types, or when using the RW API. @@ -1415,9 +1416,10 @@ struct ib_ah { typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); enum ib_poll_context { - IB_POLL_DIRECT, /* caller context, no hw completions */ - IB_POLL_SOFTIRQ, /* poll from softirq context */ - IB_POLL_WORKQUEUE, /* poll from workqueue */ + IB_POLL_DIRECT, /* caller context, no hw completions */ + IB_POLL_SOFTIRQ, /* poll from softirq context */ + IB_POLL_WORKQUEUE, /* poll from workqueue */ + IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ }; struct ib_cq { @@ -1434,6 +1436,7 @@ struct ib_cq { struct irq_poll iop; struct work_struct work; }; + struct workqueue_struct *comp_wq; }; struct ib_srq { diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index 56710e03101c6d854b42685bb529461666ec9800..1fcf14aee28a87bc3c406cf120cbdf2fa6d43f93 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -5,8 +5,6 @@ struct scsi_cmnd; struct scsi_device; struct scsi_sense_hdr; -#define SCSI_LOG_BUFSIZE 128 - extern void scsi_print_command(struct scsi_cmnd *); extern size_t __scsi_format_command(char *, size_t, const unsigned char *, size_t); diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index f0da77a337d351dc210a125da13c789df4c1b2c2..b4dd1a015c9468909a6bd096eda2195e165dd86d 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -344,6 +344,8 @@ struct device; #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */ #define SND_SOC_DAPM_PRE_POST_PMD \ (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD) +#define SND_SOC_DAPM_PRE_POST_PMU \ + (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU) /* convenience event type detection */ #define SND_SOC_DAPM_EVENT_ON(e) \ diff --git a/include/sound/timer.h b/include/sound/timer.h index c4d76ff056c6efd3431a4ed7aa3c6f6888c69e80..7ae226ab6990832d64c43935dded32463e739790 100644 --- a/include/sound/timer.h +++ b/include/sound/timer.h @@ -90,6 +90,8 @@ struct snd_timer { struct list_head ack_list_head; struct list_head sack_list_head; /* slow ack list head */ struct tasklet_struct task_queue; + int max_instances; /* upper limit of timer instances */ + int num_instances; /* current number of timer instances */ }; struct snd_timer_instance { diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h index b58635f722dace7879713ea36406ede4bb1f4dd4..ae1e1fba2e13f432fbf278b403d6983a0404ec87 100644 --- a/include/uapi/linux/isdn/capicmd.h +++ b/include/uapi/linux/isdn/capicmd.h @@ -15,6 +15,7 @@ #define CAPI_MSG_BASELEN 8 #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) +#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4) /*----- CAPI commands -----*/ #define CAPI_ALERT 0x01 diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 360a8bcf795eef41cc579f9a78960d332e77bdbf..864943fe9260b337c819f9aa0b8d1591aaf7be4f 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -110,6 +110,7 @@ #define IPA_IOCTL_GSB_DISCONNECT 62 #define IPA_IOCTL_GET_PHERIPHERAL_EP_INFO 63 #define IPA_IOCTL_GET_NAT_IN_SRAM_INFO 64 +#define IPA_IOCTL_APP_CLOCK_VOTE 65 /** * max size of the header to be inserted @@ -2309,6 +2310,10 @@ struct ipa_ioc_bridge_vlan_mapping_info { IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \ struct ipa_nat_in_sram_info) +#define IPA_IOC_APP_CLOCK_VOTE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_APP_CLOCK_VOTE, \ + uint32_t) + /* * unique magic number of the Tethering bridge ioctls */ @@ -2414,6 +2419,18 @@ struct ipa_nat_in_sram_info { uint32_t best_nat_in_sram_size_rqst; }; +/** + * enum ipa_app_clock_vote_type + * + * The types of votes that can be accepted by the + * IPA_IOC_APP_CLOCK_VOTE ioctl + */ +enum ipa_app_clock_vote_type { + IPA_APP_CLK_DEVOTE = 0, + IPA_APP_CLK_VOTE = 1, + IPA_APP_CLK_RESET_VOTE = 2, +}; + #define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \ TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \ struct teth_ioc_set_bridge_mode *) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 6039aa774f6e2f3bd164baf8fc9ea5891ba9c928..1f685051b2e9ce986bdf9a7e4f8557e2e0c8c3ff 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1096,7 +1096,7 @@ static void audit_log_execve_info(struct audit_context *context, } /* write as much as we can to the audit log */ - if (len_buf > 0) { + if (len_buf >= 0) { /* NOTE: some magic numbers here - basically if we * can't fit a reasonable amount of data into the * existing audit buffer, flush it and start with diff --git a/kernel/cpu.c b/kernel/cpu.c index f102c4d6e952b50dd04ac06915d9c3b632d9c022..81a78c22bbc9d0d1d21ebbe16dcaa2349d6ba83c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -385,6 +385,7 @@ void __init cpu_smt_disable(bool force) pr_info("SMT: Force disabled\n"); cpu_smt_control = CPU_SMT_FORCE_DISABLED; } else { + pr_info("SMT: disabled\n"); cpu_smt_control = CPU_SMT_DISABLED; } } @@ -2312,7 +2313,18 @@ void __init boot_cpu_hotplug_init(void) this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } -enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +static enum cpu_mitigations cpu_mitigations __ro_after_init = + CPU_MITIGATIONS_AUTO; static int __init mitigations_parse_cmdline(char *arg) { @@ -2330,6 +2342,20 @@ static int __init mitigations_parse_cmdline(char *arg) } early_param("mitigations", mitigations_parse_cmdline); +/* mitigations=off */ +bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} +EXPORT_SYMBOL_GPL(cpu_mitigations_off); + +/* mitigations=auto,nosmt */ +bool cpu_mitigations_auto_nosmt(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; +} +EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); + static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) diff --git a/kernel/elfcore.c b/kernel/elfcore.c index e556751d15d944d3ee51706bacb6d431979fe541..a2b29b9bdfcb29e13acc1273f22812d089dd054c 100644 --- a/kernel/elfcore.c +++ b/kernel/elfcore.c @@ -2,6 +2,7 @@ #include #include #include +#include Elf_Half __weak elf_core_extra_phdrs(void) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index fbfab5722254ab86be1945bbf4088bf3d04b6bc8..8ddd29476c0dcf31d047bc2258170cf93014688a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1846,7 +1846,7 @@ static void handle_trampoline(struct pt_regs *regs) sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); - force_sig_info(SIGILL, SEND_SIG_FORCED, current); + force_sig(SIGILL, current); } @@ -1962,7 +1962,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) if (unlikely(err)) { uprobe_warn(current, "execute the probed insn, sending SIGILL."); - force_sig_info(SIGILL, SEND_SIG_FORCED, current); + force_sig(SIGILL, current); } } diff --git a/kernel/fork.c b/kernel/fork.c index 42ec3d660cab58c39aa6a11612fc2a00a02c2d38..04e239cdcfaa23731498afc88a2c333fe7e082ae 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2513,7 +2513,7 @@ int sysctl_max_threads(struct ctl_table *table, int write, struct ctl_table t; int ret; int threads = max_threads; - int min = MIN_THREADS; + int min = 1; int max = MAX_THREADS; t = *table; @@ -2525,7 +2525,7 @@ int sysctl_max_threads(struct ctl_table *table, int write, if (ret || !write) return ret; - set_max_threads(threads); + max_threads = threads; return 0; } diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index b86886beee4f2e21c0f3dacf0aed0370ce79ebcc..867fb0ed4aa62a55cb829543a9500255bc6c2f16 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -37,6 +37,8 @@ static void resend_irqs(unsigned long arg) irq = find_first_bit(irqs_resend, nr_irqs); clear_bit(irq, irqs_resend); desc = irq_to_desc(irq); + if (!desc) + continue; local_irq_disable(); desc->handle_irq(desc); local_irq_enable(); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e2845dd53b30ba8f8d80ac3f33260fdeef56f902..1b75fb8c7735e55de3e457f20fa385c01fe44e67 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -514,8 +514,14 @@ static void do_free_cleaned_kprobes(void) struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, &freeing_list, list) { - BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); + if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { + /* + * This must not happen, but if there is a kprobe + * still in use, keep it on kprobes hash list. + */ + continue; + } free_aggr_kprobe(&op->kp); } } @@ -1454,7 +1460,8 @@ static int check_kprobe_address_safe(struct kprobe *p, /* Ensure it is not in reserved area nor out of text */ if (!kernel_text_address((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || - jump_label_text_reserved(p->addr, p->addr)) { + jump_label_text_reserved(p->addr, p->addr) || + find_bug((unsigned long)p->addr)) { ret = -EINVAL; goto out; } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4b27aaffdf3529c538754566022cfff59336ddfb..d7f425698a4a1d15e763a1a262a2a2f75f55a052 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3446,6 +3446,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name, unsigned int depth; int i; + if (unlikely(!debug_locks)) + return 0; + depth = curr->lockdep_depth; /* * This function is about (re)setting the class of a held lock, diff --git a/kernel/panic.c b/kernel/panic.c index bebc117e30dd4bb216e6c7cb908f56c4ed8b820e..f9c25b23e7a23d89c97aa72698ae05ddc0c3545a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -149,6 +149,7 @@ void panic(const char *fmt, ...) * after setting panic_cpu) from invoking panic() again. */ local_irq_disable(); + preempt_disable_notrace(); /* * It's possible to come here directly from a panic-assertion and diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index f4c93ce56e5a3093e10784e2a7b14159eb3316e0..f0d02d46a1116f44cf5acbccfb6a1c9a269738ee 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -360,7 +360,6 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; static u32 syslog_idx; -static enum log_flags syslog_prev; static size_t syslog_partial; /* index and sequence number of the first record stored in the buffer */ @@ -374,7 +373,6 @@ static u32 log_next_idx; /* the next printk record to write to the console */ static u64 console_seq; static u32 console_idx; -static enum log_flags console_prev; /* the next printk record to read after the last 'clear' command */ static u64 clear_seq; @@ -389,6 +387,7 @@ static u32 clear_idx; /* record buffer */ #define LOG_ALIGN __alignof__(struct printk_log) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) +#define LOG_BUF_LEN_MAX (u32)(1 << 31) static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; @@ -643,27 +642,15 @@ static void append_char(char **pp, char *e, char c) } static ssize_t msg_print_ext_header(char *buf, size_t size, - struct printk_log *msg, u64 seq, - enum log_flags prev_flags) + struct printk_log *msg, u64 seq) { u64 ts_usec = msg->ts_nsec; - char cont = '-'; do_div(ts_usec, 1000); - /* - * If we couldn't merge continuation line fragments during the print, - * export the stored flags to allow an optional external merge of the - * records. Merging the records isn't always neccessarily correct, like - * when we hit a race during printing. In most cases though, it produces - * better readable output. 'c' in the record flags mark the first - * fragment of a line, '+' the following. - */ - if (msg->flags & LOG_CONT) - cont = (prev_flags & LOG_CONT) ? '+' : 'c'; - return scnprintf(buf, size, "%u,%llu,%llu,%c;", - (msg->facility << 3) | msg->level, seq, ts_usec, cont); + (msg->facility << 3) | msg->level, seq, ts_usec, + msg->flags & LOG_CONT ? 'c' : '-'); } static ssize_t msg_print_ext_body(char *buf, size_t size, @@ -718,7 +705,6 @@ static ssize_t msg_print_ext_body(char *buf, size_t size, struct devkmsg_user { u64 seq; u32 idx; - enum log_flags prev; struct ratelimit_state rs; struct mutex lock; char buf[CONSOLE_EXT_LOG_MAX]; @@ -828,12 +814,11 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, msg = log_from_idx(user->idx); len = msg_print_ext_header(user->buf, sizeof(user->buf), - msg, user->seq, user->prev); + msg, user->seq); len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len, log_dict(msg), msg->dict_len, log_text(msg), msg->text_len); - user->prev = msg->flags; user->idx = log_next(user->idx); user->seq++; raw_spin_unlock_irq(&logbuf_lock); @@ -1003,18 +988,23 @@ void log_buf_kexec_setup(void) static unsigned long __initdata new_log_buf_len; /* we practice scaling the ring buffer by powers of 2 */ -static void __init log_buf_len_update(unsigned size) +static void __init log_buf_len_update(u64 size) { + if (size > (u64)LOG_BUF_LEN_MAX) { + size = (u64)LOG_BUF_LEN_MAX; + pr_err("log_buf over 2G is not supported.\n"); + } + if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) - new_log_buf_len = size; + new_log_buf_len = (unsigned long)size; } /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { - unsigned int size; + u64 size; if (!str) return -EINVAL; @@ -1064,7 +1054,7 @@ void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; - int free; + unsigned int free; if (log_buf != __log_buf) return; @@ -1084,7 +1074,7 @@ void __init setup_log_buf(int early) } if (unlikely(!new_log_buf)) { - pr_err("log_buf_len: %ld bytes not available\n", + pr_err("log_buf_len: %lu bytes not available\n", new_log_buf_len); return; } @@ -1097,8 +1087,8 @@ void __init setup_log_buf(int early) memcpy(log_buf, __log_buf, __LOG_BUF_LEN); raw_spin_unlock_irqrestore(&logbuf_lock, flags); - pr_info("log_buf_len: %d bytes\n", log_buf_len); - pr_info("early log buf free: %d(%d%%)\n", + pr_info("log_buf_len: %u bytes\n", log_buf_len); + pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); } @@ -1219,26 +1209,12 @@ static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf) return len; } -static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, - bool syslog, char *buf, size_t size) +static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size) { const char *text = log_text(msg); size_t text_size = msg->text_len; - bool prefix = true; - bool newline = true; size_t len = 0; - if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)) - prefix = false; - - if (msg->flags & LOG_CONT) { - if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE)) - prefix = false; - - if (!(msg->flags & LOG_NEWLINE)) - newline = false; - } - do { const char *next = memchr(text, '\n', text_size); size_t text_len; @@ -1256,22 +1232,17 @@ static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, text_len + 1 >= size - len) break; - if (prefix) - len += print_prefix(msg, syslog, buf + len); + len += print_prefix(msg, syslog, buf + len); memcpy(buf + len, text, text_len); len += text_len; - if (next || newline) - buf[len++] = '\n'; + buf[len++] = '\n'; } else { /* SYSLOG_ACTION_* buffer size only calculation */ - if (prefix) - len += print_prefix(msg, syslog, NULL); + len += print_prefix(msg, syslog, NULL); len += text_len; - if (next || newline) - len++; + len++; } - prefix = true; text = next; } while (text); @@ -1297,7 +1268,6 @@ static int syslog_print(char __user *buf, int size) /* messages are gone, move to first one */ syslog_seq = log_first_seq; syslog_idx = log_first_idx; - syslog_prev = 0; syslog_partial = 0; } if (syslog_seq == log_next_seq) { @@ -1307,13 +1277,11 @@ static int syslog_print(char __user *buf, int size) skip = syslog_partial; msg = log_from_idx(syslog_idx); - n = msg_print_text(msg, syslog_prev, true, text, - LOG_LINE_MAX + PREFIX_MAX); + n = msg_print_text(msg, true, text, LOG_LINE_MAX + PREFIX_MAX); if (n - syslog_partial <= size) { /* message fits into buffer, move forward */ syslog_idx = log_next(syslog_idx); syslog_seq++; - syslog_prev = msg->flags; n -= syslog_partial; syslog_partial = 0; } else if (!len){ @@ -1356,7 +1324,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) u64 next_seq; u64 seq; u32 idx; - enum log_flags prev; /* * Find first record that fits, including all following records, @@ -1364,12 +1331,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear) */ seq = clear_seq; idx = clear_idx; - prev = 0; while (seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); - len += msg_print_text(msg, prev, true, NULL, 0); - prev = msg->flags; + len += msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; } @@ -1377,12 +1342,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear) /* move first record forward until length fits into the buffer */ seq = clear_seq; idx = clear_idx; - prev = 0; while (len > size && seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); - len -= msg_print_text(msg, prev, true, NULL, 0); - prev = msg->flags; + len -= msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; } @@ -1395,7 +1358,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct printk_log *msg = log_from_idx(idx); int textlen; - textlen = msg_print_text(msg, prev, true, text, + textlen = msg_print_text(msg, true, text, LOG_LINE_MAX + PREFIX_MAX); if (textlen < 0) { len = textlen; @@ -1403,7 +1366,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) } idx = log_next(idx); seq++; - prev = msg->flags; raw_spin_unlock_irq(&logbuf_lock); if (copy_to_user(buf + len, text, textlen)) @@ -1416,7 +1378,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) /* messages are gone, move to next one */ seq = log_first_seq; idx = log_first_idx; - prev = 0; } } } @@ -1517,7 +1478,6 @@ int do_syslog(int type, char __user *buf, int len, int source) /* messages are gone, move to first one */ syslog_seq = log_first_seq; syslog_idx = log_first_idx; - syslog_prev = 0; syslog_partial = 0; } if (source == SYSLOG_FROM_PROC) { @@ -1530,16 +1490,14 @@ int do_syslog(int type, char __user *buf, int len, int source) } else { u64 seq = syslog_seq; u32 idx = syslog_idx; - enum log_flags prev = syslog_prev; error = 0; while (seq < log_next_seq) { struct printk_log *msg = log_from_idx(idx); - error += msg_print_text(msg, prev, true, NULL, 0); + error += msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; - prev = msg->flags; } error -= syslog_partial; } @@ -1721,7 +1679,7 @@ static size_t cont_print_text(char *text, size_t size) size_t textlen = 0; size_t len; - if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) { + if (cont.cons == 0) { textlen += print_time(cont.ts_nsec, text); size -= textlen; } @@ -1993,11 +1951,9 @@ static u64 syslog_seq; static u32 syslog_idx; static u64 console_seq; static u32 console_idx; -static enum log_flags syslog_prev; static u64 log_first_seq; static u32 log_first_idx; static u64 log_next_seq; -static enum log_flags console_prev; static struct cont { size_t len; size_t cons; @@ -2009,15 +1965,15 @@ static char *log_dict(const struct printk_log *msg) { return NULL; } static struct printk_log *log_from_idx(u32 idx) { return NULL; } static u32 log_next(u32 idx) { return 0; } static ssize_t msg_print_ext_header(char *buf, size_t size, - struct printk_log *msg, u64 seq, - enum log_flags prev_flags) { return 0; } + struct printk_log *msg, + u64 seq) { return 0; } static ssize_t msg_print_ext_body(char *buf, size_t size, char *dict, size_t dict_len, char *text, size_t text_len) { return 0; } static void call_console_drivers(int level, const char *ext_text, size_t ext_len, const char *text, size_t len) {} -static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, +static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size) { return 0; } static size_t cont_print_text(char *text, size_t size) { return 0; } static bool suppress_message_printing(int level) { return false; } @@ -2409,7 +2365,6 @@ void console_unlock(void) /* messages are gone, move to first one */ console_seq = log_first_seq; console_idx = log_first_idx; - console_prev = 0; } else { len = 0; } @@ -2434,16 +2389,14 @@ void console_unlock(void) * will properly dump everything later. */ msg->flags &= ~LOG_NOCONS; - console_prev = msg->flags; goto skip; } - len += msg_print_text(msg, console_prev, false, - text + len, sizeof(text) - len); + len += msg_print_text(msg, false, text + len, sizeof(text) - len); if (nr_ext_console_drivers) { ext_len = msg_print_ext_header(ext_text, sizeof(ext_text), - msg, console_seq, console_prev); + msg, console_seq); ext_len += msg_print_ext_body(ext_text + ext_len, sizeof(ext_text) - ext_len, log_dict(msg), msg->dict_len, @@ -2451,7 +2404,6 @@ void console_unlock(void) } console_idx = log_next(console_idx); console_seq++; - console_prev = msg->flags; raw_spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ @@ -2746,7 +2698,6 @@ void register_console(struct console *newcon) raw_spin_lock_irqsave(&logbuf_lock, flags); console_seq = syslog_seq; console_idx = syslog_idx; - console_prev = syslog_prev; raw_spin_unlock_irqrestore(&logbuf_lock, flags); /* * We're about to replay the log buffer. Only do this to the @@ -3098,7 +3049,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, goto out; msg = log_from_idx(dumper->cur_idx); - l = msg_print_text(msg, 0, syslog, line, size); + l = msg_print_text(msg, syslog, line, size); dumper->cur_idx = log_next(dumper->cur_idx); dumper->cur_seq++; @@ -3167,7 +3118,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, u32 idx; u64 next_seq; u32 next_idx; - enum log_flags prev; size_t l = 0; bool ret = false; @@ -3190,27 +3140,23 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, /* calculate length of entire buffer */ seq = dumper->cur_seq; idx = dumper->cur_idx; - prev = 0; while (seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); - l += msg_print_text(msg, prev, true, NULL, 0); + l += msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; - prev = msg->flags; } /* move first record forward until length fits into the buffer */ seq = dumper->cur_seq; idx = dumper->cur_idx; - prev = 0; - while (l > size && seq < dumper->next_seq) { + while (l >= size && seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); - l -= msg_print_text(msg, prev, true, NULL, 0); + l -= msg_print_text(msg, true, NULL, 0); idx = log_next(idx); seq++; - prev = msg->flags; } /* last message in next interation */ @@ -3221,10 +3167,9 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, while (seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); - l += msg_print_text(msg, prev, syslog, buf + l, size - l); + l += msg_print_text(msg, syslog, buf + l, size - l); idx = log_next(idx); seq++; - prev = msg->flags; } dumper->next_seq = next_seq; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ef1bd778f088beae7d53e0f3e89c9f84f9182aa5..f288da4c2b62843d36f32ea3cc3dc4294d9dc057 100755 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9197,10 +9197,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; -#else - /* We don't support RT-tasks being in separate groups */ - if (task->sched_class != &fair_sched_class) - return -EINVAL; #endif /* * Serialize against wake_up_new_task() such that if its diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c old mode 100644 new mode 100755 index ba9dce429bbe7ea237cf93e71126f127d3164d08..26ea5c8130ecec1fb8a0c8515f683dd1467c9b60 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1938,6 +1938,7 @@ const struct sched_class dl_sched_class = { .update_curr = update_curr_dl, #ifdef CONFIG_SCHED_WALT .fixup_walt_sched_stats = fixup_walt_sched_stats_common, + .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg, #endif }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c old mode 100644 new mode 100755 index 3027a5837402bfd456d0c4dc67e279730b39a2cd..40e7d106cf85ea16a5777964d8fbb17f036f4cca --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4279,6 +4279,23 @@ static int tg_throttle_down(struct task_group *tg, void *data) return 0; } +#ifdef CONFIG_SCHED_WALT +static inline void walt_propagate_cumulative_runnable_avg(u64 *accumulated, + u64 value, bool add) +{ + if (add) + *accumulated += value; + else + *accumulated -= value; +} +#else +/* + * Provide a nop definition since cumulative_runnable_avg is not + * available in rq or cfs_rq when WALT is not enabled. + */ +#define walt_propagate_cumulative_runnable_avg(...) +#endif + static void throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); @@ -4305,6 +4322,9 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq); + walt_propagate_cumulative_runnable_avg( + &qcfs_rq->cumulative_runnable_avg, + cfs_rq->cumulative_runnable_avg, false); if (qcfs_rq->load.weight) dequeue = 0; @@ -4313,6 +4333,9 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { sub_nr_running(rq, task_delta); walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq); + walt_propagate_cumulative_runnable_avg( + &rq->cumulative_runnable_avg, + cfs_rq->cumulative_runnable_avg, false); } cfs_rq->throttled = 1; @@ -4376,6 +4399,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq); + walt_propagate_cumulative_runnable_avg( + &cfs_rq->cumulative_runnable_avg, + tcfs_rq->cumulative_runnable_avg, true); if (cfs_rq_throttled(cfs_rq)) break; @@ -4384,6 +4410,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { add_nr_running(rq, task_delta); walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq); + walt_propagate_cumulative_runnable_avg( + &rq->cumulative_runnable_avg, + tcfs_rq->cumulative_runnable_avg, true); } /* determine whether we need to wake up potentially idle cpu */ @@ -4827,6 +4856,30 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) } } +#ifdef CONFIG_SCHED_WALT +static void walt_fixup_cumulative_runnable_avg_fair(struct rq *rq, + struct task_struct *p, + u64 new_task_load) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + s64 task_load_delta = (s64)new_task_load - p->ravg.demand; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + cfs_rq->cumulative_runnable_avg += task_load_delta; + if (cfs_rq_throttled(cfs_rq)) + break; + } + + /* Fix up rq only if we didn't find any throttled cfs_rq */ + if (!se) + walt_fixup_cumulative_runnable_avg(rq, p, new_task_load); +} + +#endif /* CONFIG_SCHED_WALT */ + #else /* CONFIG_CFS_BANDWIDTH */ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) { @@ -4869,6 +4922,9 @@ static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static inline void update_runtime_enabled(struct rq *rq) {} static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} +#define walt_fixup_cumulative_runnable_avg_fair \ + walt_fixup_cumulative_runnable_avg + #endif /* CONFIG_CFS_BANDWIDTH */ /************************************************** @@ -10367,9 +10423,10 @@ static int load_balance(int this_cpu, struct rq *this_rq, out_balanced: /* * We reach balance although we may have faced some affinity - * constraints. Clear the imbalance flag if it was set. + * constraints. Clear the imbalance flag only if other tasks got + * a chance to move and fix the imbalance. */ - if (sd_parent) { + if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) @@ -10387,13 +10444,22 @@ static int load_balance(int this_cpu, struct rq *this_rq, sd->nr_balance_failed = 0; out_one_pinned: + ld_moved = 0; + + /* + * idle_balance() disregards balance intervals, so we could repeatedly + * reach this code, which would lead to balance_interval skyrocketting + * in a short amount of time. Skip the balance_interval increase logic + * to avoid that. + */ + if (env.idle == CPU_NEWLY_IDLE) + goto out; + /* tune up the balancing interval */ if (((env.flags & LBF_ALL_PINNED) && sd->balance_interval < MAX_PINNED_INTERVAL) || (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; - - ld_moved = 0; out: trace_sched_load_balance(this_cpu, idle, *continue_balancing, group ? group->cpumask[0] : 0, @@ -11725,6 +11791,8 @@ const struct sched_class fair_sched_class = { #endif #ifdef CONFIG_SCHED_WALT .fixup_walt_sched_stats = walt_fixup_sched_stats_fair, + .fixup_cumulative_runnable_avg = + walt_fixup_cumulative_runnable_avg_fair, #endif }; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index ced758792dedbcfc8c0b170b4be6b0841e9fcc55..5fdbc2353f30d1498cad3ef4843ab20ded6a1285 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, if (!rcu_access_pointer(group->poll_kworker)) { struct sched_param param = { - .sched_priority = MAX_RT_PRIO - 1, + .sched_priority = 1, }; struct kthread_worker *kworker; @@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, mutex_unlock(&group->trigger_lock); return ERR_CAST(kworker); } - sched_setscheduler(kworker->task, SCHED_FIFO, ¶m); + sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); kthread_init_delayed_work(&group->poll_work, psi_poll_work); rcu_assign_pointer(group->poll_kworker, kworker); @@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref) * deadlock while waiting for psi_poll_work to acquire trigger_lock */ if (kworker_to_destroy) { + /* + * After the RCU grace period has expired, the worker + * can no longer be found through group->poll_kworker. + * But it might have been already scheduled before + * that - deschedule it cleanly before destroying it. + */ kthread_cancel_delayed_work_sync(&group->poll_work); + atomic_set(&group->poll_scheduled, 0); + kthread_destroy_worker(kworker_to_destroy); } kfree(t); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c old mode 100644 new mode 100755 index 1d704a46c20cb4b141785fba3d69f71ded31cfbe..4e56c0859a5efbf41a3a4d276130a8dcbba00ae6 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2649,6 +2649,7 @@ const struct sched_class rt_sched_class = { .update_curr = update_curr_rt, #ifdef CONFIG_SCHED_WALT .fixup_walt_sched_stats = fixup_walt_sched_stats_common, + .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg, #endif }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h old mode 100644 new mode 100755 index e8faa552b684b57efc2bca546f0aa34f5f1412c2..430140ec046da18ff8d7bccab66d8ca3e108e182 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1566,6 +1566,9 @@ struct sched_class { #ifdef CONFIG_SCHED_WALT void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p, u32 new_task_load, u32 new_pred_demand); + void (*fixup_cumulative_runnable_avg)(struct rq *rq, + struct task_struct *task, + u64 new_task_load); #endif }; diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c old mode 100644 new mode 100755 index a6ce92c0ffaf575cfdd1290f1626b91973e44a2e..8e7b7c12338c97c5ecbf825726bfc80788d8516f --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -139,5 +139,6 @@ const struct sched_class stop_sched_class = { .update_curr = update_curr_stop, #ifdef CONFIG_SCHED_WALT .fixup_walt_sched_stats = fixup_walt_sched_stats_common, + .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg, #endif }; diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c old mode 100644 new mode 100755 index c34311bb30cd5922f5560c6b44f74ae30165b5aa..0050ca56b2699d4b2f46e98325e41158728e9ac1 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -53,6 +53,21 @@ u64 walt_load_reported_window; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; +void +walt_fixup_cumulative_runnable_avg(struct rq *rq, + struct task_struct *p, u64 new_task_load) +{ + s64 task_load_delta = (s64)new_task_load - task_load(p); + struct walt_sched_stats *stats = &rq->walt_stats; + + stats->cumulative_runnable_avg += task_load_delta; + if ((s64)stats->cumulative_runnable_avg < 0) + panic("cra less than zero: tld: %lld, task_load(p) = %u\n", + task_load_delta, task_load(p)); + + walt_fixup_cum_window_demand(rq, task_load_delta); +} + u64 sched_ktime_clock(void) { if (unlikely(sched_ktime_suspended)) diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h old mode 100644 new mode 100755 index 7d505c300015f3b618a927961334650677f2d871..9671e56b24d9a30f33d7d9f615fa1332f195c07d --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -155,6 +155,11 @@ extern void mark_task_starting(struct task_struct *p); extern void set_window_start(struct rq *rq); void account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); +void walt_fixup_cumulative_runnable_avg(struct rq *rq, struct task_struct *p, + u64 new_task_load); + + + extern bool do_pl_notif(struct rq *rq); #define SCHED_HIGH_IRQ_TIMEOUT 3 @@ -304,6 +309,9 @@ static inline void walt_sched_init(struct rq *rq) { } static inline void walt_rotate_work_init(void) { } static inline void walt_rotation_checkpoint(int nr_big) { } static inline void walt_update_last_enqueue(struct task_struct *p) { } +static inline void walt_fixup_cumulative_runnable_avg(struct rq *rq, + struct task_struct *p, + u64 new_task_load) { } static inline void update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { } diff --git a/kernel/signal.c b/kernel/signal.c index 0b65a3e0d71957981814a87b7040a57c791219ee..77d638270c43f312da92d7aeb17d7949e9178e25 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -75,6 +75,10 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force) handler = sig_handler(t, sig); + /* SIGKILL and SIGSTOP may not be sent to the global init */ + if (unlikely(is_global_init(t) && sig_kernel_only(sig))) + return true; + if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && handler == SIG_DFL && !(force && sig_kernel_only(sig))) return 1; diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index aa25aac580148e005fd56c6273fc5dcd2f49558a..1c2dc98ba63df048336cac26309e7ea42f7c3f9d 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -573,7 +573,7 @@ static int alarm_timer_create(struct k_itimer *new_timer) enum alarmtimer_type type; if (!alarmtimer_get_rtcdev()) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; @@ -615,7 +615,7 @@ static void alarm_timer_get(struct k_itimer *timr, static int alarm_timer_del(struct k_itimer *timr) { if (!rtcdev) - return -ENOTSUPP; + return -EOPNOTSUPP; if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0) return TIMER_RETRY; @@ -639,7 +639,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, ktime_t exp; if (!rtcdev) - return -ENOTSUPP; + return -EOPNOTSUPP; if (flags & ~TIMER_ABSTIME) return -EINVAL; @@ -801,7 +801,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, struct restart_block *restart; if (!alarmtimer_get_rtcdev()) - return -ENOTSUPP; + return -EOPNOTSUPP; if (flags & ~TIMER_ABSTIME) return -EINVAL; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 018e2f5946efb44db670d9737eda3548baa3e601..97da9247cb38072d7ba5a6a421894239039c948e 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1596,21 +1596,23 @@ void timer_clear_idle(void) static int collect_expired_timers(struct timer_base *base, struct hlist_head *heads) { + unsigned long now = READ_ONCE(jiffies); + /* * NOHZ optimization. After a long idle sleep we need to forward the * base to current jiffies. Avoid a loop by searching the bitfield for * the next expiring timer. */ - if ((long)(jiffies - base->clk) > 2) { + if ((long)(now - base->clk) > 2) { unsigned long next = __next_timer_interrupt(base); /* * If the next timer is ahead of time forward to current * jiffies, otherwise forward to the next expiry time: */ - if (time_after(next, jiffies)) { + if (time_after(next, now)) { /* The call site will increment clock! */ - base->clk = jiffies - 1; + base->clk = now - 1; return 0; } base->clk = next; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e9216ed32562670341c6e6a1e1fc5fc3158b963a..e1e11bbb175c9155da5460c78a782a1580847f8a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3773,9 +3773,14 @@ static int show_traces_open(struct inode *inode, struct file *file) if (tracing_disabled) return -ENODEV; + if (trace_array_get(tr) < 0) + return -ENODEV; + ret = seq_open(file, &show_traces_seq_ops); - if (ret) + if (ret) { + trace_array_put(tr); return ret; + } m = file->private_data; m->private = tr; @@ -3783,6 +3788,14 @@ static int show_traces_open(struct inode *inode, struct file *file) return 0; } +static int show_traces_release(struct inode *inode, struct file *file) +{ + struct trace_array *tr = inode->i_private; + + trace_array_put(tr); + return seq_release(inode, file); +} + static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) @@ -3813,8 +3826,8 @@ static const struct file_operations tracing_fops = { static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, - .release = seq_release, .llseek = seq_lseek, + .release = show_traces_release, }; static ssize_t @@ -5357,6 +5370,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); + trace_seq_init(&iter->seq); iter->pos = -1; trace_event_read_lock(); diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index f00b0131c8f9577ce2efe36ee19fc1a8b7a8283c..5fe23f0ee7db6c2418551d8025a1533c54c84e2e 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -151,7 +151,7 @@ void trace_hwlat_callback(bool enter) if (enter) nmi_ts_start = time_get(); else - nmi_total_ts = time_get() - nmi_ts_start; + nmi_total_ts += time_get() - nmi_ts_start; } if (enter) @@ -257,6 +257,8 @@ static int get_sample(void) /* Keep a running maximum ever recorded hardware latency */ if (sample > tr->max_latency) tr->max_latency = sample; + if (outer_sample > tr->max_latency) + tr->max_latency = outer_sample; } out: diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 639f799219ec93f598ae8307f25633403923686d..27e14c0b93a3af8f5967117d689fae5ec5431f1b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -557,7 +557,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE int "Maximum kmemleak early log entries" depends on DEBUG_KMEMLEAK range 200 40000 - default 400 + default 16000 help Kmemleak must track all the memory allocations to avoid reporting false positives. Since memory may be allocated or diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c30d07e99dba4cc32be6aeb4d353d79058509b4b..72de6444934dc23731ee336328025a3e3ade92bd 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -44,7 +44,12 @@ asmlinkage __visible void dump_stack(void) was_locked = 1; } else { local_irq_restore(flags); - cpu_relax(); + /* + * Wait for the lock to release before jumping to + * atomic_cmpxchg() in order to mitigate the thundering herd + * problem. + */ + do { cpu_relax(); } while (atomic_read(&dump_lock) != -1); goto retry; } diff --git a/mm/filemap.c b/mm/filemap.c index 1a5780bbec95a30942fb73eeee4e8d500055e69b..1aaebed9d20d485ef6d78e486bcdec7bec8638f9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -389,7 +389,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, .range_end = end, }; - if (!mapping_cap_writeback_dirty(mapping)) + if (!mapping_cap_writeback_dirty(mapping) || + !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; wbc_attach_fdatawrite_inode(&wbc, mapping->host); diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index eec1150125b9857f3f84185d1d5beeff64ec034f..e430e04997eed69ac07466ad6a7dbac2b12d8d1c 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, again: rcu_read_lock(); h_cg = hugetlb_cgroup_from_task(current); - if (!css_tryget_online(&h_cg->css)) { + if (!css_tryget(&h_cg->css)) { rcu_read_unlock(); goto again; } diff --git a/mm/ksm.c b/mm/ksm.c index 8450f9b5b0f5725f34b3a24cc3d0b69dba4e36d4..34b64fb48a8bf0d04b73fbe24b5cbeadeabd2121 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -728,13 +728,13 @@ static int remove_stable_node(struct stable_node *stable_node) return 0; } - if (WARN_ON_ONCE(page_mapped(page))) { - /* - * This should not happen: but if it does, just refuse to let - * merge_across_nodes be switched - there is no need to panic. - */ - err = -EBUSY; - } else { + /* + * Page could be still mapped if this races with __mmput() running in + * between ksm_exit() and exit_mmap(). Just refuse to let + * merge_across_nodes/max_page_sharing be switched. + */ + err = -EBUSY; + if (!page_mapped(page)) { /* * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cb882488d6b1124c9a1ed84c4b6c3a933857f68e..0ec91c8270f3e9357c2be03f65c33ea687e041f9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -741,7 +741,7 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) if (unlikely(!memcg)) memcg = root_mem_cgroup; } - } while (!css_tryget_online(&memcg->css)); + } while (!css_tryget(&memcg->css)); rcu_read_unlock(); return memcg; } @@ -2344,6 +2344,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { + + /* + * Enforce __GFP_NOFAIL allocation because callers are not + * prepared to see failures and likely do not have any failure + * handling code. + */ + if (gfp & __GFP_NOFAIL) { + page_counter_charge(&memcg->kmem, nr_pages); + return 0; + } cancel_charge(memcg, nr_pages); return -ENOMEM; } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3c9c56268821dab7dfc03e5bfb60d2a577ae212b..68d009b904155e3777a2b3a4dddbaba63d87fbb7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1344,7 +1344,12 @@ static int online_memory_block(struct memory_block *mem, void *arg) return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); } -/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +/* + * NOTE: The caller must call lock_device_hotplug() to serialize hotplug + * and online/offline operations (triggered e.g. by sysfs). + * + * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG + */ int __ref add_memory_resource(int nid, struct resource *res, bool online) { u64 start, size; @@ -1422,9 +1427,9 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) mem_hotplug_done(); return ret; } -EXPORT_SYMBOL_GPL(add_memory_resource); -int __ref add_memory(int nid, u64 start, u64 size) +/* requires device_hotplug_lock, see add_memory_resource() */ +int __ref __add_memory(int nid, u64 start, u64 size) { struct resource *res; int ret; @@ -1438,6 +1443,17 @@ int __ref add_memory(int nid, u64 start, u64 size) release_memory_resource(res); return ret; } + +int add_memory(int nid, u64 start, u64 size) +{ + int rc; + + lock_device_hotplug(); + rc = __add_memory(nid, start, size); + unlock_device_hotplug(); + + return rc; +} EXPORT_SYMBOL_GPL(add_memory); #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c047ff55207a4d2b880672a73307a8c471ab0556..f683f30e964c384b7436a42457d16694fb914e05 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2141,6 +2141,13 @@ EXPORT_SYMBOL(tag_pages_for_writeback); * not miss some pages (e.g., because some other process has cleared TOWRITE * tag we set). The rule we follow is that TOWRITE tag can be cleared only * by the process clearing the DIRTY tag (and submitting the page for IO). + * + * To avoid deadlocks between range_cyclic writeback and callers that hold + * pages in PageWriteback to aggregate IO until write_cache_pages() returns, + * we do not loop back to the start of the file. Doing so causes a page + * lock/page writeback access order inversion - we should only ever lock + * multiple pages in ascending page->index order, and looping back to the start + * of the file violates that rule and causes deadlocks. */ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, @@ -2155,7 +2162,6 @@ int write_cache_pages(struct address_space *mapping, pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; - int cycled; int range_whole = 0; int tag; @@ -2163,23 +2169,17 @@ int write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; - if (index == 0) - cycled = 1; - else - cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; - cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; -retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; @@ -2271,17 +2271,14 @@ int write_cache_pages(struct address_space *mapping, pagevec_release(&pvec); cond_resched(); } - if (!cycled && !done) { - /* - * range_cyclic: - * We hit the last page and there is more work to be done: wrap - * back to the start of the file - */ - cycled = 1; - index = 0; - end = writeback_index - 1; - goto retry; - } + + /* + * If we hit the last page and there is more work to be done: wrap + * back the index back to the start of the file for the next + * time we are called. + */ + if (wbc->range_cyclic && !done) + done_index = 0; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; diff --git a/mm/shmem.c b/mm/shmem.c index 793a8cf784080d9980920f9223e06a8e2cb10285..f4af71c39674deb0696636c093d46cea83c1f5a7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2457,31 +2457,33 @@ static void shmem_tag_pins(struct address_space *mapping) void **slot; pgoff_t start; struct page *page; + unsigned int tagged = 0; lru_add_drain(); start = 0; - rcu_read_lock(); + spin_lock_irq(&mapping->tree_lock); radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { - page = radix_tree_deref_slot(slot); + page = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); if (!page || radix_tree_exception(page)) { if (radix_tree_deref_retry(page)) { slot = radix_tree_iter_retry(&iter); continue; } } else if (page_count(page) - page_mapcount(page) > 1) { - spin_lock_irq(&mapping->tree_lock); radix_tree_tag_set(&mapping->page_tree, iter.index, SHMEM_TAG_PINNED); - spin_unlock_irq(&mapping->tree_lock); } - if (need_resched()) { - cond_resched_rcu(); - slot = radix_tree_iter_next(&iter); - } + if (++tagged % 1024) + continue; + + slot = radix_tree_iter_next(&iter); + spin_unlock_irq(&mapping->tree_lock); + cond_resched(); + spin_lock_irq(&mapping->tree_lock); } - rcu_read_unlock(); + spin_unlock_irq(&mapping->tree_lock); } /* diff --git a/mm/slub.c b/mm/slub.c index b5c9fde0d7b700ab6d2f4e8434bb0e3c9c800172..fc12c5b1637caa941faeb31c60f83b83254b07fa 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4748,7 +4748,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s, } } - get_online_mems(); + /* + * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" + * already held which will conflict with an existing lock order: + * + * mem_hotplug_lock->slab_mutex->kernfs_mutex + * + * We don't really need mem_hotplug_lock (to hold off + * slab_mem_going_offline_callback) here because slab's memory hot + * unplug code doesn't destroy the kmem_cache->node[] data. + */ + #ifdef CONFIG_SLUB_DEBUG if (flags & SO_ALL) { struct kmem_cache_node *n; @@ -4789,7 +4799,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif - put_online_mems(); kfree(nodes); return x + sprintf(buf + x, "\n"); } diff --git a/mm/usercopy.c b/mm/usercopy.c index 27ae932c03cf5202084bb04971b0389a46f9ee34..9bc43f83cee489307ebcae9d5e98efcee023baa0 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include @@ -216,7 +217,12 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n, if (!virt_addr_valid(ptr)) return NULL; - page = virt_to_head_page(ptr); + /* + * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the + * highmem page or fallback to virt_to_page(). The following + * is effectively a highmem-aware virt_to_head_page(). + */ + page = compound_head(kmap_to_page((void *)ptr)); /* Check slab allocator for flags and size. */ if (PageSlab(page)) diff --git a/mm/vmstat.c b/mm/vmstat.c index 98811ced3b568e3ac5d67fece84f0dd347b2da9d..67c22932408683f3a2e4b699d7471cf0965ed97e 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1790,7 +1790,7 @@ static int __init setup_vmstat(void) #endif #ifdef CONFIG_PROC_FS proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); + proc_create("pagetypeinfo", 0400, NULL, &pagetypeinfo_file_ops); proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); #endif diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index e206d98b3b822970961e3a3823f7b4740ff5342d..d74092cc639a2071bcf5d820f138e0dc1880dba7 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1029,6 +1029,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol, */ if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) goto out; + + rc = -EPERM; + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) + goto out; + rc = -ENOMEM; sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern); if (!sk) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 2772f6a13fcb4f9de50bc7abfee712aa46ecf4e9..de55a3f001dc4ade5beb42fd800590fad273f219 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -859,6 +859,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, break; case SOCK_RAW: + if (!capable(CAP_NET_RAW)) + return -EPERM; break; default: return -ESOCKTNOSUPPORT; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 1aeeadca620cd090ce763ef017cb01f6a2ec2890..f435435b447e09e1d9aec773134d244104577d92 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -618,17 +618,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm2_packet: potential OGM2 in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ -static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool +batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm2_packet *ogm2_packet) { int next_buff_pos = 0; - next_buff_pos += buff_pos + BATADV_OGM2_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm2_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm2_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@ -775,7 +781,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, ogm_packet = (struct batadv_ogm2_packet *)skb->data; while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_v_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM2_HLEN; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 163a239bda91a10f6c9183ce637574ff1f29fc07..6f78489fdb132806f9ea18a9aad394e896ead3d9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5089,11 +5089,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_UNKNOWN_CONN_ID); - if (min < hcon->le_conn_min_interval || - max > hcon->le_conn_max_interval) - return send_conn_param_neg_reply(hdev, handle, - HCI_ERROR_INVALID_LL_PARAMS); - if (hci_check_conn_params(min, max, latency, timeout)) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 4912e80dacefaae8ca297f15f24981e2653a9cb5..1306962a792afc2304df5899427c8b742bb96350 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -5277,14 +5277,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, memset(&rsp, 0, sizeof(rsp)); - if (min < hcon->le_conn_min_interval || - max > hcon->le_conn_max_interval) { - BT_DBG("requested connection interval exceeds current bounds."); - err = -EINVAL; - } else { - err = hci_check_conn_params(min, max, latency, to_multiplier); - } - + err = hci_check_conn_params(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else @@ -6818,6 +6811,16 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) chan->sdu_len = sdu_len; chan->sdu_last_frag = skb; + /* Detect if remote is not able to use the selected MPS */ + if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { + u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; + + /* Adjust the number of credits */ + BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); + chan->mps = mps_len; + l2cap_chan_le_send_credits(chan); + } + return 0; } diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 6406010e155bbcbc4e69b4c30b572a1d8231c296..7007683973b48ae209b69351142cc54f897ec9f3 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -372,7 +372,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb, struct nlmsghdr *nlh; struct nlattr *nest; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); if (!nlh) return -EMSGSIZE; diff --git a/net/core/datagram.c b/net/core/datagram.c index 146502f310cefbbe6cb990d9b286d6aea1c22dd1..619c63a74594daa0ef5b97be0bdd0b6b92add905 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -96,7 +96,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, if (error) goto out_err; - if (sk->sk_receive_queue.prev != skb) + if (READ_ONCE(sk->sk_receive_queue.prev) != skb) goto out; /* Socket shut down? */ diff --git a/net/core/dev.c b/net/core/dev.c index 7bce57ba5e47ce64b5c9df249410a7075695612b..69ac821ad6f460db88bbca136ff03b4ca99e12a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3030,7 +3030,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de } skb = next; - if (netif_xmit_stopped(txq) && skb) { + if (netif_tx_queue_stopped(txq) && skb) { rc = NETDEV_TX_BUSY; break; } @@ -7485,6 +7485,8 @@ int register_netdevice(struct net_device *dev) ret = notifier_to_errno(ret); if (ret) { rollback_registered(dev); + rcu_barrier(); + dev->reg_state = NETREG_UNREGISTERED; } /* diff --git a/net/core/ethtool.c b/net/core/ethtool.c index ffe7b03c9ab5543768e53108f77b10e7ead0df43..454f73fcb3a692610ee056eae504dff7f13a42c1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1438,11 +1438,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr) static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) { - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct ethtool_wolinfo wol; if (!dev->ethtool_ops->get_wol) return -EOPNOTSUPP; + memset(&wol, 0, sizeof(struct ethtool_wolinfo)); + wol.cmd = ETHTOOL_GWOL; dev->ethtool_ops->get_wol(dev, &wol); if (copy_to_user(useraddr, &wol, sizeof(wol))) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ab7c50026cae9edb5941b82f5e46c5b7b587fbbc..26b0f70d2f1c963f5c2a761cc173c80553c56ef8 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -563,45 +563,34 @@ bool __skb_flow_dissect(const struct sk_buff *skb, } EXPORT_SYMBOL(__skb_flow_dissect); -static u32 hashrnd __read_mostly; +static siphash_key_t hashrnd __read_mostly; static __always_inline void __flow_hash_secret_init(void) { net_get_random_once(&hashrnd, sizeof(hashrnd)); } -static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, - u32 keyval) +static const void *flow_keys_hash_start(const struct flow_keys *flow) { - return jhash2(words, length, keyval); -} - -static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) -{ - const void *p = flow; - - BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); - return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); + BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); + return &flow->FLOW_KEYS_HASH_START_FIELD; } static inline size_t flow_keys_hash_length(const struct flow_keys *flow) { - size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); - BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); - BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != - sizeof(*flow) - sizeof(flow->addrs)); + size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET; switch (flow->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - diff -= sizeof(flow->addrs.v4addrs); + len += sizeof(flow->addrs.v4addrs); break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - diff -= sizeof(flow->addrs.v6addrs); + len += sizeof(flow->addrs.v6addrs); break; case FLOW_DISSECTOR_KEY_TIPC_ADDRS: - diff -= sizeof(flow->addrs.tipcaddrs); + len += sizeof(flow->addrs.tipcaddrs); break; } - return (sizeof(*flow) - diff) / sizeof(u32); + return len; } __be32 flow_get_u32_src(const struct flow_keys *flow) @@ -667,14 +656,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) } } -static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) +static inline u32 __flow_hash_from_keys(struct flow_keys *keys, + const siphash_key_t *keyval) { u32 hash; __flow_hash_consistentify(keys); - hash = __flow_hash_words(flow_keys_hash_start(keys), - flow_keys_hash_length(keys), keyval); + hash = siphash(flow_keys_hash_start(keys), + flow_keys_hash_length(keys), keyval); if (!hash) hash = 1; @@ -684,12 +674,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) u32 flow_hash_from_keys(struct flow_keys *keys) { __flow_hash_secret_init(); - return __flow_hash_from_keys(keys, hashrnd); + return __flow_hash_from_keys(keys, &hashrnd); } EXPORT_SYMBOL(flow_hash_from_keys); static inline u32 ___skb_get_hash(const struct sk_buff *skb, - struct flow_keys *keys, u32 keyval) + struct flow_keys *keys, + const siphash_key_t *keyval) { skb_flow_dissect_flow_keys(skb, keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); @@ -737,7 +728,7 @@ u32 __skb_get_hash_symmetric(struct sk_buff *skb) NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); - return __flow_hash_from_keys(&keys, hashrnd); + return __flow_hash_from_keys(&keys, &hashrnd); } EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); @@ -757,13 +748,14 @@ void __skb_get_hash(struct sk_buff *skb) __flow_hash_secret_init(); - hash = ___skb_get_hash(skb, &keys, hashrnd); + hash = ___skb_get_hash(skb, &keys, &hashrnd); __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); } EXPORT_SYMBOL(__skb_get_hash); -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb) { struct flow_keys keys; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ba724576764e333ca9e0007af05de62867eac374..ead1a32c68f71db8b6bb28ff153795535651e068 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1724,6 +1724,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_MAC]) { struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); + if (ivm->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, @@ -1735,6 +1737,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_VLAN]) { struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); + if (ivv->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, @@ -1767,6 +1771,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (len == 0) return -EINVAL; + if (ivvl[0]->vf >= INT_MAX) + return -EINVAL; err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, ivvl[0]->qos, ivvl[0]->vlan_proto); if (err < 0) @@ -1777,6 +1783,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); struct ifla_vf_info ivf; + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_get_vf_config) err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); @@ -1795,6 +1803,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_RATE]) { struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_rate) err = ops->ndo_set_vf_rate(dev, ivt->vf, @@ -1807,6 +1817,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_SPOOFCHK]) { struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); + if (ivs->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_spoofchk) err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, @@ -1818,6 +1830,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_LINK_STATE]) { struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); + if (ivl->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_link_state) err = ops->ndo_set_vf_link_state(dev, ivl->vf, @@ -1831,6 +1845,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) err = -EOPNOTSUPP; ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); + if (ivrssq_en->vf >= INT_MAX) + return -EINVAL; if (ops->ndo_set_vf_rss_query_en) err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, ivrssq_en->setting); @@ -1841,6 +1857,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_TRUST]) { struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_trust) err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); @@ -1851,15 +1869,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_IB_NODE_GUID]) { struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); + if (ivt->vf >= INT_MAX) + return -EINVAL; if (!ops->ndo_set_vf_guid) return -EOPNOTSUPP; - return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); } if (tb[IFLA_VF_IB_PORT_GUID]) { struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); + if (ivt->vf >= INT_MAX) + return -EINVAL; if (!ops->ndo_set_vf_guid) return -EOPNOTSUPP; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 57743e5d324fbc3ed2e57b5cb734c08246a163de..3d91d43882748e540b8661864b690d05b3420233 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3100,6 +3100,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int pos; int dummy; + if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && + (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { + /* gso_size is untrusted, and we have a frag_list with a linear + * non head_frag head. + * + * (we assume checking the first list_skb member suffices; + * i.e if either of the list_skb members have non head_frag + * head, then the first one has too). + * + * If head_skb's headlen does not fit requested gso_size, it + * means that the frag_list members do NOT terminate on exact + * gso_size boundaries. Hence we cannot perform skb_frag_t page + * sharing. Therefore we must fallback to copying the frag_list + * skbs; we do so by disabling SG. + */ + if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) + features &= ~NETIF_F_SG; + } + __skb_push(head_skb, doffset); proto = skb_network_protocol(head_skb, &dummy); if (unlikely(!proto)) diff --git a/net/core/sock.c b/net/core/sock.c index 1d31d71c7442ae5f9f9da5878be9dbce6f825ade..2d78b7c18e649be9f022730b2142753a2979323c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1434,8 +1434,6 @@ static void __sk_destruct(struct rcu_head *head) sk_filter_uncharge(sk, filter); RCU_INIT_POINTER(sk->sk_filter, NULL); } - if (rcu_access_pointer(sk->sk_reuseport_cb)) - reuseport_detach_sock(sk); sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); @@ -1458,7 +1456,14 @@ static void __sk_destruct(struct rcu_head *head) void sk_destruct(struct sock *sk) { - if (sock_flag(sk, SOCK_RCU_FREE)) + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); + + if (rcu_access_pointer(sk->sk_reuseport_cb)) { + reuseport_detach_sock(sk); + use_call_rcu = true; + } + + if (use_call_rcu) call_rcu(&sk->sk_rcu, __sk_destruct); else __sk_destruct(&sk->sk_rcu); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 1d6d3aaa8c3dadbb9e85725b7eba6a1d42ce855c..322268b88fec39b1ba793d500d9d923d4503a077 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -121,7 +121,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr, inet->inet_sport, inet->inet_dport); - inet->inet_id = dp->dccps_iss ^ jiffies; + inet->inet_id = prandom_u32(); err = dccp_connect(sk); rt = NULL; @@ -417,7 +417,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; - newinet->inet_id = jiffies; + newinet->inet_id = prandom_u32(); if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) goto put_and_exit; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 0f99297b2fb3517942bf74fee08ed3e61d65a4f0..f1792a847d0bec510fbd1f2607448eb45d851bd8 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -59,7 +59,7 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree) dst->tree = tree; dst->cpu_switch = -1; INIT_LIST_HEAD(&dst->list); - list_add_tail(&dsa_switch_trees, &dst->list); + list_add_tail(&dst->list, &dsa_switch_trees); kref_init(&dst->refcount); return dst; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index bf5d26d83af064ef33c31c0e66887be67335f0c4..f66e4afb978a7059e80a7fa742fdf49b33c04e79 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -1003,6 +1003,9 @@ static int ieee802154_create(struct net *net, struct socket *sock, switch (sock->type) { case SOCK_RAW: + rc = -EPERM; + if (!capable(CAP_NET_RAW)) + goto out; proto = &ieee802154_raw_prot; ops = &ieee802154_raw_ops; break; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f915abff1350a86af8d5bb89725b751c061b0fb5..d3eddfd138753615623418b80345d2d9b397cb36 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -75,7 +75,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len inet->inet_dport = usin->sin_port; sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); - inet->inet_id = jiffies; + inet->inet_id = prandom_u32(); sk_dst_set(sk, &rt->dst); err = 0; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 90c654012510bf1f43c2e316cfeb92a08d419b18..6aec95e1fc134265ab72929733b0d1d96b36d22d 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1358,8 +1358,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local) int ret = 0; unsigned int hash = fib_laddr_hashfn(local); struct hlist_head *head = &fib_info_laddrhash[hash]; + int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; struct net *net = dev_net(dev); - int tb_id = l3mdev_fib_table(dev); struct fib_info *fi; if (!fib_info_laddrhash || local == 0) diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index b798862b6be5da405c5aad288a39fdad5bb5aeae..7efe740c06ebff66d5e50f1cc067bc5973fc58e3 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -86,13 +86,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, options = (__be32 *)(greh + 1); if (greh->flags & GRE_CSUM) { - if (skb_checksum_simple_validate(skb)) { + if (!skb_checksum_simple_validate(skb)) { + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + } else if (csum_err) { *csum_err = true; return -EINVAL; } - skb_checksum_try_convert(skb, IPPROTO_GRE, 0, - null_compute_pseudo); options++; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 576f705d81809fccc61833f7445873620af6b4eb..9609ad71dd260a1162f6dd037010195c4f22f321 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -224,13 +224,10 @@ static void gre_err(struct sk_buff *skb, u32 info) const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; - bool csum_err = false; - if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), - iph->ihl * 4) < 0) { - if (!csum_err) /* ignore csum errors. */ - return; - } + if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP), + iph->ihl * 4) < 0) + return; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0cca95545d899f85a5e57ec0d33b3e3971edf229..cbfa4fdb5e4c7a1bb44805571faf66ccaf9df130 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -907,16 +907,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) if (peer->rate_tokens == 0 || time_after(jiffies, (peer->rate_last + - (ip_rt_redirect_load << peer->rate_tokens)))) { + (ip_rt_redirect_load << peer->n_redirects)))) { __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); peer->rate_last = jiffies; - ++peer->rate_tokens; ++peer->n_redirects; #ifdef CONFIG_IP_ROUTE_VERBOSE if (log_martians && - peer->rate_tokens == ip_rt_redirect_number) + peer->n_redirects == ip_rt_redirect_number) net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", &ip_hdr(skb)->saddr, inet_iif(skb), &ip_hdr(skb)->daddr, &gw); @@ -2229,7 +2228,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, struct fib_result res; struct rtable *rth; int orig_oif; - int err = -ENETUNREACH; + int err; res.tclassid = 0; res.fi = NULL; @@ -2244,11 +2243,14 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, rcu_read_lock(); if (fl4->saddr) { - rth = ERR_PTR(-EINVAL); if (ipv4_is_multicast(fl4->saddr) || ipv4_is_lbcast(fl4->saddr) || - ipv4_is_zeronet(fl4->saddr)) + ipv4_is_zeronet(fl4->saddr)) { + rth = ERR_PTR(-EINVAL); goto out; + } + + rth = ERR_PTR(-ENETUNREACH); /* I removed check for oif == dev_out->oif here. It was wrong for two reasons: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 435450febca6d07d26ae372e66dd0081198ff363..82ed99740fe51aa8a412a8f9f453c0cdd38dd347 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -248,7 +248,7 @@ static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; } static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d6a95261b0e337c95f4b40703b2fd66eadd9d43c..f9ad41c82e3cdaa54c0cd23b6ff6e4c1629c7d4d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -240,7 +240,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_sport, usin->sin_port); - inet->inet_id = tp->write_seq ^ jiffies; + inet->inet_id = prandom_u32(); if (tcp_fastopen_defer_connect(sk, &err)) return err; @@ -1315,7 +1315,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; - newinet->inet_id = newtp->write_seq ^ jiffies; + newinet->inet_id = prandom_u32(); if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 022ffe40c4da951297719bf4157d8dcdd032d289..162b0e3c82a9218a601b0bc98c87d8157e3466c9 100755 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5506,13 +5506,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) switch (event) { case RTM_NEWADDR: /* - * If the address was optimistic - * we inserted the route at the start of - * our DAD process, so we don't need - * to do it again + * If the address was optimistic we inserted the route at the + * start of our DAD process, so we don't need to do it again. + * If the device was taken down in the middle of the DAD + * cycle there is a race where we could get here without a + * host route, so nothing to insert. That will be fixed when + * the device is brought up. */ - if (!rcu_access_pointer(ifp->rt->rt6i_node)) + if (ifp->rt && !rcu_access_pointer(ifp->rt->rt6i_node)) { ip6_ins_rt(ifp->rt); + } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) { + pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n", + &ifp->addr, ifp->idev->dev->name); + } + if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); if (!ipv6_addr_any(&ifp->peer_addr)) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index aacfb4bce1533b3f3b38e1173c18cb1bb6b33099..e726a61ae6dc1bbc068d431ff796d5ec9e3cb16b 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -168,6 +168,16 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; + /* While RFC4291 is not explicit about v4mapped addresses + * in IPv6 headers, it seems clear linux dual-stack + * model can not deal properly with these. + * Security models could be fooled by ::ffff:127.0.0.1 for example. + * + * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 + */ + if (ipv6_addr_v4mapped(&hdr->saddr)) + goto err; + skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 2a965d4c24211d70be236369d34f3274b2306235..45c1a5b0358f8c87de3aad5fcf31d04927f4d87f 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -240,7 +240,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net) return ping_proc_register(net, &ping_v6_seq_afinfo); } -static void __net_init ping_v6_proc_exit_net(struct net *net) +static void __net_exit ping_v6_proc_exit_net(struct net *net) { return ping_proc_unregister(net, &ping_v6_seq_afinfo); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 47ca2a2f1cf88641025829631f0775ba8e2c15b5..16eba7b5f1a9db64d8c505dddaf811861a929427 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1856,7 +1856,6 @@ static int __net_init sit_init_net(struct net *net) err_reg_dev: ipip6_dev_free(sitn->fb_tunnel_dev); - free_netdev(sitn->fb_tunnel_dev); err_alloc_dev: return err; } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 53b0eeb3fbf5976ebc7bca97c38dc10935af83dc..b63d50440a58b19943709955909c3df0901c10ac 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -116,6 +116,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; + struct iphdr *iph; int length; if (!pskb_may_pull(skb, 4)) @@ -174,24 +175,17 @@ static int l2tp_ip_recv(struct sk_buff *skb) goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) { - sk = tunnel->sock; - sock_hold(sk); - } else { - struct iphdr *iph = (struct iphdr *) skb_network_header(skb); - - read_lock_bh(&l2tp_ip_lock); - sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, - inet_iif(skb), tunnel_id); - if (!sk) { - read_unlock_bh(&l2tp_ip_lock); - goto discard; - } + iph = (struct iphdr *)skb_network_header(skb); - sock_hold(sk); + read_lock_bh(&l2tp_ip_lock); + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb), + tunnel_id); + if (!sk) { read_unlock_bh(&l2tp_ip_lock); + goto discard; } + sock_hold(sk); + read_unlock_bh(&l2tp_ip_lock); if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a007f112b69a1e90a95c9a61d0f71b01c0a98a26..af513e0204f433c16557c50d38eb40c443cc79a9 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -128,6 +128,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; + struct ipv6hdr *iph; int length; if (!pskb_may_pull(skb, 4)) @@ -186,24 +187,17 @@ static int l2tp_ip6_recv(struct sk_buff *skb) goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) { - sk = tunnel->sock; - sock_hold(sk); - } else { - struct ipv6hdr *iph = ipv6_hdr(skb); - - read_lock_bh(&l2tp_ip6_lock); - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, - inet6_iif(skb), tunnel_id); - if (!sk) { - read_unlock_bh(&l2tp_ip6_lock); - goto discard; - } + iph = ipv6_hdr(skb); - sock_hold(sk); + read_lock_bh(&l2tp_ip6_lock); + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, + inet6_iif(skb), tunnel_id); + if (!sk) { read_unlock_bh(&l2tp_ip6_lock); + goto discard; } + sock_hold(sk); + read_unlock_bh(&l2tp_ip6_lock); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 4b60f68cb4925251dc88a2c3d5f78f502e2e2437..8354ae40ec85b5380dd24d49749452b66fd884a5 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { + skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - rc = llc_conn_send_pdu(sk, skb); + skb_get(skb); + llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; @@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { + skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - rc = llc_conn_send_pdu(sk, skb); + skb_get(skb); + llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index b9290a183a2fd8017b286bdf575916f293733fee..94c78cc49d3e02acea615fd39cc7147158c52c36 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -30,7 +30,7 @@ #endif static int llc_find_offset(int state, int ev_type); -static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb); +static void llc_conn_send_pdus(struct sock *sk); static int llc_conn_service(struct sock *sk, struct sk_buff *skb); static int llc_exec_conn_trans_actions(struct sock *sk, struct llc_conn_state_trans *trans, @@ -193,11 +193,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) return rc; } -int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) +void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) { /* queue PDU to send to MAC layer */ skb_queue_tail(&sk->sk_write_queue, skb); - return llc_conn_send_pdus(sk, skb); + llc_conn_send_pdus(sk); } /** @@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk, NULL); + llc_conn_send_pdus(sk); out:; } @@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk, NULL); + llc_conn_send_pdus(sk); out:; } @@ -340,16 +340,12 @@ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) /** * llc_conn_send_pdus - Sends queued PDUs * @sk: active connection - * @hold_skb: the skb held by caller, or NULL if does not care * - * Sends queued pdus to MAC layer for transmission. When @hold_skb is - * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent - * successfully, or 1 for failure. + * Sends queued pdus to MAC layer for transmission. */ -static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) +static void llc_conn_send_pdus(struct sock *sk) { struct sk_buff *skb; - int ret = 0; while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); @@ -361,20 +357,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); if (!skb2) break; - dev_queue_xmit(skb2); - } else { - bool is_target = skb == hold_skb; - int rc; - - if (is_target) - skb_get(skb); - rc = dev_queue_xmit(skb); - if (is_target) - ret = rc; + skb = skb2; } + dev_queue_xmit(skb); } - - return ret; } /** diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index e896a2c53b1202d2d0265fd11fd2b0b542aa64f1..f1e442a39db8d9f37f547103c37991ca335dfb79 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -127,9 +127,7 @@ void llc_sap_close(struct llc_sap *sap) list_del_rcu(&sap->node); spin_unlock_bh(&llc_sap_list_lock); - synchronize_rcu(); - - kfree(sap); + kfree_rcu(sap, rcu); } static struct packet_type llc_packet_type __read_mostly = { diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index a94bd56bcac6f74110a0f231814fb45f4a600f30..7ae4cc684d3abe351a72110f3041ebe003063d26 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) + if (likely(!rc)) { + skb_get(skb); rc = dev_queue_xmit(skb); + } return rc; } @@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) + if (likely(!rc)) { + skb_get(skb); rc = dev_queue_xmit(skb); + } return rc; } @@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) + if (likely(!rc)) { + skb_get(skb); rc = dev_queue_xmit(skb); + } return rc; } diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 5404d0d195cc581613e356b75bd70321e617673e..d51ff9df9c959757a47371e0a1212876367021f7 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -197,29 +197,22 @@ static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb) * After executing actions of the event, upper layer will be indicated * if needed(on receiving an UI frame). sk can be null for the * datalink_proto case. + * + * This function always consumes a reference to the skb. */ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - /* - * We have to hold the skb, because llc_sap_next_state - * will kfree it in the sending path and we need to - * look at the skb->cb, where we encode llc_sap_state_ev. - */ - skb_get(skb); ev->ind_cfm_flag = 0; llc_sap_next_state(sap, skb); - if (ev->ind_cfm_flag == LLC_IND) { - if (skb->sk->sk_state == TCP_LISTEN) - kfree_skb(skb); - else { - llc_save_primitive(skb->sk, skb, ev->prim); - /* queue skb to the user. */ - if (sock_queue_rcv_skb(skb->sk, skb)) - kfree_skb(skb); - } + if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) { + llc_save_primitive(skb->sk, skb, ev->prim); + + /* queue skb to the user. */ + if (sock_queue_rcv_skb(skb->sk, skb) == 0) + return; } kfree_skb(skb); } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index bcec1240f41d90bec8b97a3bf52f8a8fcf918384..9769db9818d2f4c468dd4128ac243d17ae7e15ba 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { struct ieee80211_local *local = sdata->local; - struct txq_info *txqi = to_txq_info(sdata->vif.txq); + struct txq_info *txqi; int len; + if (!sdata->vif.txq) + return 0; + + txqi = to_txq_info(sdata->vif.txq); + spin_lock_bh(&local->fq.lock); rcu_read_lock(); @@ -657,7 +662,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); DEBUGFS_ADD(hw_queues); - if (sdata->local->ops->wake_tx_queue) + if (sdata->local->ops->wake_tx_queue && + sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && + sdata->vif.type != NL80211_IFTYPE_NAN) DEBUGFS_ADD(aqm); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6708de10a3e5e27695afba56e8ea6a84e5eb02fb..0b0de3030e0dc1945da1fe28bb0d46509373645a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2123,6 +2123,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, const u8 *addr); void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); void ieee80211_tdls_chsw_work(struct work_struct *wk); +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason); +const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a8283952932f9a6fdb8c9b020357ff1cb19f35c9..afc998983780f1352b51d2094d6d577d0f09cebf 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2434,7 +2434,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, rcu_read_lock(); ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID); - if (WARN_ON_ONCE(ssid == NULL)) + if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN, + "invalid SSID element (len=%d)", ssid ? ssid[1] : -1)) ssid_len = 0; else ssid_len = ssid[1]; @@ -2755,7 +2756,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, #define case_WLAN(type) \ case WLAN_REASON_##type: return #type -static const char *ieee80211_get_reason_code_string(u16 reason_code) +const char *ieee80211_get_reason_code_string(u16 reason_code) { switch (reason_code) { case_WLAN(UNSPECIFIED); @@ -2820,6 +2821,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, if (len < 24 + 2) return; + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + if (ifmgd->associated && ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) { const u8 *bssid = ifmgd->associated->bssid; @@ -2869,8 +2875,14 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - sdata_info(sdata, "disassociated from %pM (Reason: %u)\n", - mgmt->sa, reason_code); + if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) { + ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code); + return; + } + + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", + mgmt->sa, reason_code, + ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); @@ -4680,7 +4692,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); - if (!ssidie) { + if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) { rcu_read_unlock(); kfree(assoc_data); return -EINVAL; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 30fbabf4bcbc16aeb93e9c673d190cd3b615988b..593184d14b3ef69f86df63fb7feb7ed33185a310 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -128,7 +128,7 @@ #define CCK_GROUP \ [MINSTREL_CCK_GROUP] = { \ - .streams = 0, \ + .streams = 1, \ .flags = 0, \ .duration = { \ CCK_DURATION_LIST(false), \ diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index c64ae68ae4f84607dd4a56e7e7e4a34cbd88d077..863f92c087014491996e2d6c4702112dd10cb0b4 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -2001,3 +2001,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk) } rtnl_unlock(); } + +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata, + const u8 *peer, u16 reason) +{ + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(&sdata->vif, peer); + if (!sta || !sta->tdls) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n", + peer, reason, + ieee80211_get_reason_code_string(reason)); + + ieee80211_tdls_oper_request(&sdata->vif, peer, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, + GFP_ATOMIC); +} diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index a748b0c2c981482eb853159f96ac69b563aebae8..fa5229fd370372f74d270f3e39ec7dc0d8dfc52f 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1942,8 +1942,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) } req_version->version = IPSET_PROTOCOL; - ret = copy_to_user(user, req_version, - sizeof(struct ip_set_req_version)); + if (copy_to_user(user, req_version, + sizeof(struct ip_set_req_version))) + ret = -EFAULT; goto done; } case IP_SET_OP_GET_BYNAME: { @@ -2000,7 +2001,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) } /* end of switch(op) */ copy: - ret = copy_to_user(user, data, copylen); + if (copy_to_user(user, data, copylen)) + ret = -EFAULT; done: vfree(data); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 8037b25ddb76a0797a9308890eca53cc245a0853..33125fc009cfde20a257bee4559c51388eb9de27 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -97,7 +97,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net, static void update_defense_level(struct netns_ipvs *ipvs) { struct sysinfo i; - static int old_secure_tcp = 0; int availmem; int nomem; int to_change = -1; @@ -178,35 +177,35 @@ static void update_defense_level(struct netns_ipvs *ipvs) spin_lock(&ipvs->securetcp_lock); switch (ipvs->sysctl_secure_tcp) { case 0: - if (old_secure_tcp >= 2) + if (ipvs->old_secure_tcp >= 2) to_change = 0; break; case 1: if (nomem) { - if (old_secure_tcp < 2) + if (ipvs->old_secure_tcp < 2) to_change = 1; ipvs->sysctl_secure_tcp = 2; } else { - if (old_secure_tcp >= 2) + if (ipvs->old_secure_tcp >= 2) to_change = 0; } break; case 2: if (nomem) { - if (old_secure_tcp < 2) + if (ipvs->old_secure_tcp < 2) to_change = 1; } else { - if (old_secure_tcp >= 2) + if (ipvs->old_secure_tcp >= 2) to_change = 0; ipvs->sysctl_secure_tcp = 1; } break; case 3: - if (old_secure_tcp < 2) + if (ipvs->old_secure_tcp < 2) to_change = 1; break; } - old_secure_tcp = ipvs->sysctl_secure_tcp; + ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp; if (to_change >= 0) ip_vs_protocol_timeout_change(ipvs, ipvs->sysctl_secure_tcp > 1); diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index e3ed200608788bc5e8dc96a14b8674d0fbaa7d4b..562b5452424927462db59158c7163e6e039945ce 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen, i++; } - pr_debug("Skipped up to `%c'!\n", skip); + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip); *numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index bbf0a3aa098c63ccd1577aed0a9b7b0da76af629..8c56ff2aa2fa5f71f3230f7b258bc22a673f532e 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -272,8 +272,8 @@ static void quota_mt2_destroy(const struct xt_mtdtor_param *par) } list_del(&e->list); - remove_proc_entry(e->name, proc_xt_quota); spin_unlock_bh(&counter_list_lock); + remove_proc_entry(e->name, proc_xt_quota); kfree(e); } diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index e31dea17473d923795ed6eab82917a926cdb12cd..dd59fde1dac83948de8b7bc1933e345075276d26 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -118,9 +118,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) llcp_sock->service_name = kmemdup(llcp_addr.service_name, llcp_sock->service_name_len, GFP_KERNEL); - + if (!llcp_sock->service_name) { + ret = -ENOMEM; + goto put_dev; + } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { + kfree(llcp_sock->service_name); + llcp_sock->service_name = NULL; ret = -EADDRINUSE; goto put_dev; } @@ -1011,10 +1016,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock, sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &llcp_rawsock_ops; - else + } else { sock->ops = &llcp_sock_ops; + } sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern); if (sk == NULL) diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index dbf74afe82fb6ea08564971b646df81a1730ccb1..d3c8dd5dc817b414a633ad73e69fd0cfb13113bf 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -973,7 +973,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) int rc; u32 idx; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); @@ -1022,7 +1023,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg = NULL; u32 idx; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_FIRMWARE_NAME]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); @@ -1101,7 +1103,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) local = nfc_llcp_find_local(dev); if (!local) { - nfc_put_device(dev); rc = -ENODEV; goto exit; } @@ -1161,7 +1162,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) local = nfc_llcp_find_local(dev); if (!local) { - nfc_put_device(dev); rc = -ENODEV; goto exit; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 453f806afe6e159057aba84060d03fa30621b9a7..0ddcc63a02472144dba7db950d15cfbd7548d64c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2218,7 +2218,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, }; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index e7da29021b38b7a81555d784473f88b7d6e3759d..c233924825801ecc28c5db60c4b0975b41b62683 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -44,7 +44,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev) } /* Called with rcu_read_lock_bh. */ -static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { int len, err; @@ -63,7 +64,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) } else { netdev->stats.tx_errors++; } - return 0; + return NETDEV_TX_OK; } static int internal_dev_open(struct net_device *netdev) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 7b670a9a375e1c154a10619aa4cf5d712803db19..ee930b3011ccf81190afea89dc522b4031fae49b 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -126,6 +126,7 @@ static void __qrtr_node_release(struct kref *kref) list_del(&node->item); mutex_unlock(&qrtr_node_lock); + cancel_work_sync(&node->work); skb_queue_purge(&node->rx_queue); kfree(node); } diff --git a/net/rds/ib.c b/net/rds/ib.c index 0efb3d2b338d993ba1b47aba7f4aef3ecc965d55..a118686cc1ec341d75e4f2cef946e6861ef495a7 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -138,6 +138,9 @@ static void rds_ib_add_one(struct ib_device *device) atomic_set(&rds_ibdev->refcount, 1); INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free); + INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); + INIT_LIST_HEAD(&rds_ibdev->conn_list); + rds_ibdev->max_wrs = device->attrs.max_qp_wr; rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE); @@ -189,9 +192,6 @@ static void rds_ib_add_one(struct ib_device *device) device->name, rds_ibdev->use_fastreg ? "FRMR" : "FMR"); - INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); - INIT_LIST_HEAD(&rds_ibdev->conn_list); - down_write(&rds_ib_devices_lock); list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices); up_write(&rds_ib_devices_lock); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 67adb4ecded2c3fca60c8960c9dcf890d620f72f..5b8f8b382a2e7aedc1ecc6f5ed999352ed59f52c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -948,10 +948,15 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, static int tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid, int ovr) { - int ret = 0; + int loop, ret; LIST_HEAD(actions); - ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); + for (loop = 0; loop < 10; loop++) { + ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); + if (ret != -EAGAIN) + break; + } + if (ret) return ret; @@ -989,10 +994,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) */ if (n->nlmsg_flags & NLM_F_REPLACE) ovr = 1; -replay: ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); - if (ret == -EAGAIN) - goto replay; break; case RTM_DELACTION: ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index cf9b2fe8eac6032f46a570c1482de601b41ac74f..9bebf4dc1c8e75dad65ef006794774f48c551bbf 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -54,13 +54,14 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (tb[TCA_PEDIT_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_PEDIT_PARMS]); + if (!parm->nkeys) + return -EINVAL; + ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize) return -EINVAL; if (!tcf_hash_check(tn, parm->index, a, bind)) { - if (!parm->nkeys) - return -EINVAL; ret = tcf_hash_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); if (ret) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index beb554aa8cfbb4acb5d89511b5e0030b4c9cf26e..7a434a5c43fe62085ef172034ed5535645e06de5 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1129,6 +1129,26 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, }; +static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr *opt) +{ + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); + if (err < 0) + return err; + + if (tb[TCA_CBQ_WRROPT]) { + const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]); + + if (wrr->priority > TC_CBQ_MAXPRIO) + err = -EINVAL; + } + return err; +} + static int cbq_init(struct Qdisc *sch, struct nlattr *opt) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -1136,7 +1156,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) struct tc_ratespec *r; int err; - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); + err = cbq_opt_parse(tb, opt); if (err < 0) return err; @@ -1468,10 +1488,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t struct cbq_class *parent; struct qdisc_rate_table *rtab = NULL; - if (opt == NULL) - return -EINVAL; - - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); + err = cbq_opt_parse(tb, opt); if (err < 0) return err; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index b56d57984439afc1b2057d294623b02de154a680..551cf193649e9c9b53f684157f08a4e70d8f7857 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -346,6 +346,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) goto errout; err = -EINVAL; + if (!tb[TCA_DSMARK_INDICES]) + goto errout; indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); if (hweight32(indices) != 1) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index a5ea0e9b6be485c383251636bbe681379f975ed1..29b7465c9d8ab8d900608292bbafb7f988c77713 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -57,7 +57,7 @@ struct fq_codel_sched_data { struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ u32 *backlogs; /* backlog table [flows_cnt] */ u32 flows_cnt; /* number of flows */ - u32 perturbation; /* hash perturbation */ + siphash_key_t perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ u32 drop_batch_size; u32 memory_limit; @@ -75,7 +75,7 @@ struct fq_codel_sched_data { static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, struct sk_buff *skb) { - u32 hash = skb_get_hash_perturb(skb, q->perturbation); + u32 hash = skb_get_hash_perturb(skb, &q->perturbation); return reciprocal_scale(hash, q->flows_cnt); } @@ -482,7 +482,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) q->memory_limit = 32 << 20; /* 32 MBytes */ q->drop_batch_size = 64; q->quantum = psched_mtu(qdisc_dev(sch)); - q->perturbation = prandom_u32(); + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); codel_params_init(&q->cparams); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index b8031aa5327262e10b0777e8de9efd2ed591ae76..da3a7c923d0871352b7bd505e60591dea8ac2b2e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -704,7 +704,11 @@ static void qdisc_rcu_free(struct rcu_head *head) void qdisc_destroy(struct Qdisc *qdisc) { - const struct Qdisc_ops *ops = qdisc->ops; + const struct Qdisc_ops *ops; + + if (!qdisc) + return; + ops = qdisc->ops; if (qdisc->flags & TCQ_F_BUILTIN || !atomic_dec_and_test(&qdisc->refcnt)) diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index f4b2d69973c36e854a5c80fe405e8c9e35bd1d8a..1367fe94d6303fc21153c6293b193299c25e9f0f 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -4,11 +4,11 @@ * Copyright (C) 2013 Nandita Dukkipati */ -#include #include #include #include #include +#include #include #include @@ -125,7 +125,7 @@ struct wdrr_bucket { struct hhf_sched_data { struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; - u32 perturbation; /* hash perturbation */ + siphash_key_t perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ u32 drop_overlimit; /* number of times max qdisc packet * limit was hit @@ -263,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch) } /* Get hashed flow-id of the skb. */ - hash = skb_get_hash_perturb(skb, q->perturbation); + hash = skb_get_hash_perturb(skb, &q->perturbation); /* Check if this packet belongs to an already established HH flow. */ flow_pos = hash & HHF_BIT_MASK; @@ -543,7 +543,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; - if (non_hh_quantum > INT_MAX) + if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX) return -EINVAL; sch_tree_lock(sch); @@ -593,7 +593,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = 1000; q->quantum = psched_mtu(qdisc_dev(sch)); - q->perturbation = prandom_u32(); + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); INIT_LIST_HEAD(&q->new_buckets); INIT_LIST_HEAD(&q->old_buckets); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e9812e21dbc9b99b573cc8339201e7caec0e232b..95002e56fa48b6f28157f97e01cdc286f3de2e50 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -475,7 +475,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, * skb will be queued. */ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { - struct Qdisc *rootq = qdisc_root(sch); + struct Qdisc *rootq = qdisc_root_bh(sch); u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; @@ -711,7 +711,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) int i; size_t s; - if (n > NETEM_DIST_MAX) + if (!n || n > NETEM_DIST_MAX) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 20a350bd1b1dc8283c7e352ce0a953ce0eea2544..bc176bd48c0265052f25b2f09bfaaaae02782370 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -48,7 +48,7 @@ struct sfb_bucket { * (Section 4.4 of SFB reference : moving hash functions) */ struct sfb_bins { - u32 perturbation; /* jhash perturbation */ + siphash_key_t perturbation; /* siphash key */ struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; }; @@ -219,7 +219,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) { - q->bins[slot].perturbation = prandom_u32(); + get_random_bytes(&q->bins[slot].perturbation, + sizeof(q->bins[slot].perturbation)); } static void sfb_swap_slot(struct sfb_sched_data *q) @@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, fl, &ret, &salt)) goto other_drop; - sfbhash = jhash_1word(salt, q->bins[slot].perturbation); + sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); } else { - sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); + sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); } @@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Inelastic flow */ if (q->double_buffering) { sfbhash = skb_get_hash_perturb(skb, - q->bins[slot].perturbation); + &q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d8c2b6baaad2d3fb9af0923e9440713980bb6a9c..a8d82cb7f0738a7d43a4961e43f93a1ddaac8a25 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -120,7 +120,7 @@ struct sfq_sched_data { u8 headdrop; u8 maxdepth; /* limit of packets per flow */ - u32 perturbation; + siphash_key_t perturbation; u8 cur_depth; /* depth of longest slot */ u8 flags; unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ @@ -158,7 +158,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index static unsigned int sfq_hash(const struct sfq_sched_data *q, const struct sk_buff *skb) { - return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); + return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); } static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, @@ -607,9 +607,11 @@ static void sfq_perturbation(unsigned long arg) struct Qdisc *sch = (struct Qdisc *)arg; struct sfq_sched_data *q = qdisc_priv(sch); spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + siphash_key_t nkey; + get_random_bytes(&nkey, sizeof(nkey)); spin_lock(root_lock); - q->perturbation = prandom_u32(); + q->perturbation = nkey; if (!q->filter_list && q->tail) sfq_rehash(sch); spin_unlock(root_lock); @@ -681,7 +683,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); - q->perturbation = prandom_u32(); + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); } sch_tree_unlock(sch); kfree(p); @@ -737,7 +739,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) q->quantum = psched_mtu(qdisc_dev(sch)); q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q->perturb_period = 0; - q->perturbation = prandom_u32(); + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); if (opt) { int err = sfq_change(sch, opt); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 31f461f955ec0276624ad38fdbaed8c2fd43958a..824ebbffea3377e7edfad7c5cad60775b735ec3e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -973,7 +973,7 @@ static const struct proto_ops inet6_seqpacket_ops = { .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, - .connect = inet_dgram_connect, + .connect = sctp_inet_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = sctp_getname, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d6af93a24aa03c4581b68ce9f3ea08e84a068c08..9cb06ca4eabae595b53864bbca64da80f7e341ce 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1014,7 +1014,7 @@ static const struct proto_ops inet_seqpacket_ops = { .owner = THIS_MODULE, .release = inet_release, /* Needs to be wrapped... */ .bind = inet_bind, - .connect = inet_dgram_connect, + .connect = sctp_inet_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* Semantics are different. */ @@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net) return status; } -static void __net_init sctp_ctrlsock_exit(struct net *net) +static void __net_exit sctp_ctrlsock_exit(struct net *net) { /* Free the control endpoint. */ inet_ctl_sock_destroy(net->sctp.ctl_sock); diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b1ead1776e81d5ca07c14e1680ee2b83ecfd4b5a..8b4cf78987e479c45b8d2ea37cf23c183a13b515 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -509,7 +509,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, if (net->sctp.pf_enable && (transport->state == SCTP_ACTIVE) && (transport->error_count < transport->pathmaxrxt) && - (transport->error_count > asoc->pf_retrans)) { + (transport->error_count > transport->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_PF, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 93e60068800ba32ac576894547fdb604436b0c06..21ec92011585ad820f6aca60bb9c3993bd879d38 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1074,7 +1074,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, - int addrs_size, + int addrs_size, int flags, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); @@ -1092,7 +1092,6 @@ static int __sctp_connect(struct sock *sk, union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; - unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; @@ -1240,13 +1239,7 @@ static int __sctp_connect(struct sock *sk, sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; - /* in-kernel sockets don't generally have a file allocated to them - * if all they do is call sock_create_kern(). - */ - if (sk->sk_socket->file) - f_flags = sk->sk_socket->file->f_flags; - - timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); + timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); if (assoc_id) *assoc_id = asoc->assoc_id; @@ -1341,7 +1334,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, { struct sockaddr *kaddrs; gfp_t gfp = GFP_KERNEL; - int err = 0; + int err = 0, flags = 0; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); @@ -1361,11 +1354,18 @@ static int __sctp_setsockopt_connectx(struct sock *sk, return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { - err = -EFAULT; - } else { - err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); + kfree(kaddrs); + return -EFAULT; } + /* in-kernel sockets don't generally have a file allocated to them + * if all they do is call sock_create_kern(). + */ + if (sk->sk_socket->file) + flags = sk->sk_socket->file->f_flags; + + err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); + kfree(kaddrs); return err; @@ -3979,31 +3979,36 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, - int addr_len) + int addr_len, int flags) { - int err = 0; struct sctp_af *af; + int err = -EINVAL; lock_sock(sk); - pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); - if (!af || addr_len < af->sockaddr_len) { - err = -EINVAL; - } else { - /* Pass correct addr len to common routine (so it knows there - * is only one address being passed. - */ - err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); - } + if (af && addr_len >= af->sockaddr_len) + err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL); release_sock(sk); return err; } +int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags) +{ + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; + + if (uaddr->sa_family == AF_UNSPEC) + return -EOPNOTSUPP; + + return sctp_connect(sock->sk, uaddr, addr_len, flags); +} + /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { @@ -7729,7 +7734,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; - newinet->inet_id = asoc->next_tsn ^ jiffies; + newinet->inet_id = prandom_u32(); newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; @@ -7896,7 +7901,6 @@ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, - .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, @@ -7911,7 +7915,7 @@ struct proto sctp_prot = { .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, - .get_port = sctp_get_port, + .no_autobind = true, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, @@ -7935,7 +7939,6 @@ struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, - .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, @@ -7950,7 +7953,7 @@ struct proto sctpv6_prot = { .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, - .get_port = sctp_get_port, + .no_autobind = true, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 1d74d653e6c058cfbf3669cf9258e14b6002d6b0..ad0dcb69395d7d7936e140d3423b864d72c4ee1b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -63,6 +63,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_AUTH diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 600eacce653ae26e824839c2a03e6bb7476739ba..0ef65822fdd346ee86d291b9588988faf7f1241d 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -99,64 +99,78 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); } -static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) -{ - struct list_head *q = &queue->tasks[queue->priority]; - struct rpc_task *task; - - if (!list_empty(q)) { - task = list_first_entry(q, struct rpc_task, u.tk_wait.list); - if (task->tk_owner == queue->owner) - list_move_tail(&task->u.tk_wait.list, q); - } -} - static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) { if (queue->priority != priority) { - /* Fairness: rotate the list when changing priority */ - rpc_rotate_queue_owner(queue); queue->priority = priority; + queue->nr = 1U << priority; } } -static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) -{ - queue->owner = pid; - queue->nr = RPC_BATCH_COUNT; -} - static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) { rpc_set_waitqueue_priority(queue, queue->maxpriority); - rpc_set_waitqueue_owner(queue, 0); } /* - * Add new request to a priority queue. + * Add a request to a queue list */ -static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, - struct rpc_task *task, - unsigned char queue_priority) +static void +__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) { - struct list_head *q; struct rpc_task *t; - INIT_LIST_HEAD(&task->u.tk_wait.links); - if (unlikely(queue_priority > queue->maxpriority)) - queue_priority = queue->maxpriority; - if (queue_priority > queue->priority) - rpc_set_waitqueue_priority(queue, queue_priority); - q = &queue->tasks[queue_priority]; list_for_each_entry(t, q, u.tk_wait.list) { if (t->tk_owner == task->tk_owner) { - list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); + list_add_tail(&task->u.tk_wait.links, + &t->u.tk_wait.links); + /* Cache the queue head in task->u.tk_wait.list */ + task->u.tk_wait.list.next = q; + task->u.tk_wait.list.prev = NULL; return; } } + INIT_LIST_HEAD(&task->u.tk_wait.links); list_add_tail(&task->u.tk_wait.list, q); } +/* + * Remove request from a queue list + */ +static void +__rpc_list_dequeue_task(struct rpc_task *task) +{ + struct list_head *q; + struct rpc_task *t; + + if (task->u.tk_wait.list.prev == NULL) { + list_del(&task->u.tk_wait.links); + return; + } + if (!list_empty(&task->u.tk_wait.links)) { + t = list_first_entry(&task->u.tk_wait.links, + struct rpc_task, + u.tk_wait.links); + /* Assume __rpc_list_enqueue_task() cached the queue head */ + q = t->u.tk_wait.list.next; + list_add_tail(&t->u.tk_wait.list, q); + list_del(&task->u.tk_wait.links); + } + list_del(&task->u.tk_wait.list); +} + +/* + * Add new request to a priority queue. + */ +static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, + struct rpc_task *task, + unsigned char queue_priority) +{ + if (unlikely(queue_priority > queue->maxpriority)) + queue_priority = queue->maxpriority; + __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); +} + /* * Add new request to wait queue. * @@ -194,13 +208,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, */ static void __rpc_remove_wait_queue_priority(struct rpc_task *task) { - struct rpc_task *t; - - if (!list_empty(&task->u.tk_wait.links)) { - t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); - list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); - list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); - } + __rpc_list_dequeue_task(task); } /* @@ -212,7 +220,8 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas __rpc_disable_timer(queue, task); if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue_priority(task); - list_del(&task->u.tk_wait.list); + else + list_del(&task->u.tk_wait.list); queue->qlen--; dprintk("RPC: %5u removed from queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); @@ -481,17 +490,9 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q * Service a batch of tasks from a single owner. */ q = &queue->tasks[queue->priority]; - if (!list_empty(q)) { - task = list_entry(q->next, struct rpc_task, u.tk_wait.list); - if (queue->owner == task->tk_owner) { - if (--queue->nr) - goto out; - list_move_tail(&task->u.tk_wait.list, q); - } - /* - * Check if we need to switch queues. - */ - goto new_owner; + if (!list_empty(q) && --queue->nr) { + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); + goto out; } /* @@ -503,7 +504,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q else q = q - 1; if (!list_empty(q)) { - task = list_entry(q->next, struct rpc_task, u.tk_wait.list); + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); goto new_queue; } } while (q != &queue->tasks[queue->priority]); @@ -513,8 +514,6 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q new_queue: rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); -new_owner: - rpc_set_waitqueue_owner(queue, task->tk_owner); out: return task; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 280fb317870844153f1138f6f90aba6e05f1a022..f3f05148922a197167ff11d38a102aa31e13429f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -124,7 +124,7 @@ static struct ctl_table xs_tunables_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &xprt_min_resvport_limit, - .extra2 = &xprt_max_resvport + .extra2 = &xprt_max_resvport_limit }, { .procname = "max_resvport", @@ -132,7 +132,7 @@ static struct ctl_table xs_tunables_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &xprt_min_resvport, + .extra1 = &xprt_min_resvport_limit, .extra2 = &xprt_max_resvport_limit }, { @@ -1737,11 +1737,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); } -static unsigned short xs_get_random_port(void) +static int xs_get_random_port(void) { - unsigned short range = xprt_max_resvport - xprt_min_resvport + 1; - unsigned short rand = (unsigned short) prandom_u32() % range; - return rand + xprt_min_resvport; + unsigned short min = xprt_min_resvport, max = xprt_max_resvport; + unsigned short range; + unsigned short rand; + + if (max < min) + return -EADDRINUSE; + range = max - min + 1; + rand = (unsigned short) prandom_u32() % range; + return rand + min; } /** @@ -1798,9 +1804,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) transport->srcport = xs_sock_getport(sock); } -static unsigned short xs_get_srcport(struct sock_xprt *transport) +static int xs_get_srcport(struct sock_xprt *transport) { - unsigned short port = transport->srcport; + int port = transport->srcport; if (port == 0 && transport->xprt.resvport) port = xs_get_random_port(); @@ -1821,7 +1827,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) { struct sockaddr_storage myaddr; int err, nloop = 0; - unsigned short port = xs_get_srcport(transport); + int port = xs_get_srcport(transport); unsigned short last; /* @@ -1839,8 +1845,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock) * transport->xprt.resvport == 1) xs_get_srcport above will * ensure that port is non-zero and we will bind as needed. */ - if (port == 0) - return 0; + if (port <= 0) + return port; memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); do { @@ -3223,12 +3229,8 @@ static int param_set_uint_minmax(const char *val, static int param_set_portnr(const char *val, const struct kernel_param *kp) { - if (kp->arg == &xprt_min_resvport) - return param_set_uint_minmax(val, kp, - RPC_MIN_RESVPORT, - xprt_max_resvport); return param_set_uint_minmax(val, kp, - xprt_min_resvport, + RPC_MIN_RESVPORT, RPC_MAX_RESVPORT); } diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 23f8899e0f8c3d7bda50bc57801bcc63ceab448c..7ebcaff8c1c4febc5093374bee24edecf4287ece 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -224,7 +224,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr) publ->key); } - kfree_rcu(p, rcu); + if (p) + kfree_rcu(p, rcu); } /** diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f0c2197cb6ad26c2591737adac468fc20ecba084..e7012a509035223688acbecc9b71f8212e54ac9f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -224,6 +224,8 @@ static inline void unix_release_addr(struct unix_address *addr) static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) { + *hashp = 0; + if (len <= sizeof(short) || len > sizeof(*sunaddr)) return -EINVAL; if (!sunaddr || sunaddr->sun_family != AF_UNIX) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 71ef07afe368f933136998240aa3fe17509e7d6d..3e9b1363271dc73ad121a30534aafcf7ab059bde 100755 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -213,6 +213,36 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) return __cfg80211_rdev_from_attrs(netns, info->attrs); } +static int validate_beacon_head(const struct nlattr *attr) +{ + const u8 *data = nla_data(attr); + unsigned int len = nla_len(attr); + const struct element *elem; + const struct ieee80211_mgmt *mgmt = (void *)data; + unsigned int fixedlen = offsetof(struct ieee80211_mgmt, + u.beacon.variable); + + if (len < fixedlen) + goto err; + + if (ieee80211_hdrlen(mgmt->frame_control) != + offsetof(struct ieee80211_mgmt, u.beacon)) + goto err; + + data += fixedlen; + len -= fixedlen; + + for_each_element(elem, data, len) { + /* nothing */ + } + + if (for_each_element_completed(elem, data, len)) + return 0; + +err: + return -EINVAL; +} + /* policy for the attributes */ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@ -265,7 +295,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_MESH_ID_LEN }, - [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, + [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_BINARY, + .len = ETH_ALEN }, [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, @@ -2095,6 +2126,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + memset(chandef, 0, sizeof(*chandef)); + chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq); chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = control_freq; @@ -2564,7 +2597,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag if (rdev->ops->get_channel) { int ret; - struct cfg80211_chan_def chandef; + struct cfg80211_chan_def chandef = {}; ret = rdev_get_channel(rdev, wdev, &chandef); if (ret == 0) { @@ -3051,7 +3084,7 @@ static void get_key_callback(void *c, struct key_params *params) params->cipher))) goto nla_put_failure; - if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx)) + if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx)) goto nla_put_failure; nla_nest_end(cookie->msg, key); @@ -3704,6 +3737,11 @@ static int nl80211_parse_beacon(struct nlattr *attrs[], memset(bcn, 0, sizeof(*bcn)); if (attrs[NL80211_ATTR_BEACON_HEAD]) { + int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD]); + + if (ret) + return ret; + bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]); bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]); if (!bcn->head_len) @@ -5376,6 +5414,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->del_mpath) return -EOPNOTSUPP; + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) + return -EOPNOTSUPP; + return rdev_del_mpath(rdev, dev, dst); } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 73252d7ddf4a6861a1eb10614839acde73ba58fe..c5dd4fef04337921dced8959adda9cf81fe4ac1f 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1569,7 +1569,7 @@ static void reg_call_notifier(struct wiphy *wiphy, static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct cfg80211_chan_def chandef; + struct cfg80211_chan_def chandef = {}; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); enum nl80211_iftype iftype; @@ -2621,8 +2621,54 @@ static void restore_regulatory_settings(bool reset_user) schedule_work(®_work); } +static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); + if (!(wdev->wiphy->regulatory_flags & flag)) { + wdev_unlock(wdev); + return false; + } + wdev_unlock(wdev); + } + } + + return true; +} + void regulatory_hint_disconnect(void) { + /* Restore of regulatory settings is not required when wiphy(s) + * ignore IE from connected access point but clearance of beacon hints + * is required when wiphy(s) supports beacon hints. + */ + if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { + struct reg_beacon *reg_beacon, *btmp; + + if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) + return; + + spin_lock_bh(®_pending_beacons_lock); + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + spin_unlock_bh(®_pending_beacons_lock); + + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + + return; + } + pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false); } diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 7ea2eba74c0e45af386d1f2fde6322ead4e312b8..4de2c7c1553114493443131af8282e4461e03a85 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -407,6 +407,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len, const u8 *match, int match_len, int match_offset) { + const struct element *elem; + /* match_offset can't be smaller than 2, unless match_len is * zero, in which case match_offset must be zero as well. */ @@ -414,14 +416,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len, (!match_len && match_offset))) return NULL; - while (len >= 2 && len >= ies[1] + 2) { - if ((ies[0] == eid) && - (ies[1] + 2 >= match_offset + match_len) && - !memcmp(ies + match_offset, match, match_len)) - return ies; - - len -= ies[1] + 2; - ies += ies[1] + 2; + for_each_element_id(elem, eid, ies, len) { + if (elem->datalen >= match_offset - 2 + match_len && + !memcmp(elem->data + match_offset - 2, match, match_len)) + return (void *)elem; } return NULL; diff --git a/net/wireless/util.c b/net/wireless/util.c index c934189198156f4eed40bd34fc8d67f4f6addab0..bc77b8754ff0d05e2109de0bff9c8eae893767f1 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -929,6 +929,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, } cfg80211_process_rdev_events(rdev); + cfg80211_mlme_purge_registrations(dev->ieee80211_ptr); } err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a220156cf2175a459727fcf67e1291036dca21ae..b3baa34714e3ebeb1d1ff90692a5e3e30e9130c5 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -799,7 +799,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); - struct cfg80211_chan_def chandef; + struct cfg80211_chan_def chandef = {}; int ret; switch (wdev->iftype) { diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 995163830a61f582c11b9234b370bb2c3766f7f0..9e7846b7d953470aacc1989d7200ec706614d801 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -224,6 +224,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; + int ret = 0; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) @@ -241,7 +242,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev, if (ie) { data->flags = 1; data->length = ie[1]; - memcpy(ssid, ie + 2, data->length); + if (data->length > IW_ESSID_MAX_SIZE) + ret = -EINVAL; + else + memcpy(ssid, ie + 2, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { @@ -251,7 +255,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev, } wdev_unlock(wdev); - return 0; + return ret; } int cfg80211_mgd_wext_siwap(struct net_device *dev, diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index bb9988914a5637db435ac1026a108a5f3a62cb9a..32234481ad7dbee128cc9c31531b1843baf672b1 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c @@ -370,7 +370,7 @@ static uint32_t amt_host_if_call(struct amt_host_if *acmd, unsigned int expected_sz) { uint32_t in_buf_sz; - uint32_t out_buf_sz; + ssize_t out_buf_sz; ssize_t written; uint32_t status; struct amt_host_if_resp_header *msg_hdr; diff --git a/scripts/namespace.pl b/scripts/namespace.pl index 9f3c9d47a4a5d89c4e95de825f67d84b0258f0df..4dddd4c01b6256fbe8d4ce65b966edac55dfce76 100755 --- a/scripts/namespace.pl +++ b/scripts/namespace.pl @@ -65,13 +65,14 @@ require 5; # at least perl 5 use strict; use File::Find; +use File::Spec; my $nm = ($ENV{'NM'} || "nm") . " -p"; my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment"; -my $srctree = ""; -my $objtree = ""; -$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'})); -$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'})); +my $srctree = File::Spec->curdir(); +my $objtree = File::Spec->curdir(); +$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'})); +$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'})); if ($#ARGV != -1) { print STDERR "usage: $0 takes no parameters\n"; @@ -231,9 +232,9 @@ sub do_nm } ($source = $basename) =~ s/\.o$//; if (-e "$source.c" || -e "$source.S") { - $source = "$objtree$File::Find::dir/$source"; + $source = File::Spec->catfile($objtree, $File::Find::dir, $source) } else { - $source = "$srctree$File::Find::dir/$source"; + $source = File::Spec->catfile($srctree, $File::Find::dir, $source) } if (! -e "$source.c" && ! -e "$source.S") { # No obvious source, exclude the object if it is conglomerate diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 966dd3924ea9cb34997828f10b68523615fdbf57..aa28c3f29809314bfa58dd2d695a22e5c39a8442 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -72,8 +72,16 @@ scm_version() printf -- '-svn%s' "`git svn find-rev $head`" fi - # Check for uncommitted changes - if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then + # Check for uncommitted changes. + # First, with git-status, but --no-optional-locks is only + # supported in git >= 2.14, so fall back to git-diff-index if + # it fails. Note that git-diff-index does not refresh the + # index, so it may give misleading results. See + # git-update-index(1), git-diff-index(1), and git-status(1). + if { + git --no-optional-locks status -uno --porcelain 2>/dev/null || + git diff-index --name-only HEAD + } | grep -qvE '^(.. )?scripts/package'; then printf '%s' -dirty fi diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 20e66291ca99a3c2b4b8e68e4873d3ca9d1b9fb4..5155c343406e0fd45cc6a5fbb2c57492f222e32d 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -298,8 +298,11 @@ static int ima_calc_file_hash_atfm(struct file *file, rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); rc = integrity_kernel_read(file, offset, rbuf[active], rbuf_len); - if (rc != rbuf_len) + if (rc != rbuf_len) { + if (rc >= 0) + rc = -EINVAL; goto out3; + } if (rbuf[1] && offset) { /* Using two buffers, and it is not the first diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index f60baeb338e5f13adbda545be23a870415a4a8a4..b47445022d5ce422cc576646079ce278942abc37 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key, { struct request_key_auth *rka = key->payload.data[0]; + if (!rka) + return; + seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_positive(key)) @@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key, size_t datalen; long ret; + if (!rka) + return -EKEYREVOKED; + datalen = rka->callout_len; ret = datalen; diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index 23e5808a0970b69bfed64a92d608b05537bbc074..e5d5c7fb2dace5a7bf7c4689c2e7954c38885e6c 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -474,7 +474,7 @@ char *smk_parse_smack(const char *string, int len) if (i == 0 || i >= SMK_LONGLABEL) return ERR_PTR(-EINVAL); - smack = kzalloc(i + 1, GFP_KERNEL); + smack = kzalloc(i + 1, GFP_NOFS); if (smack == NULL) return ERR_PTR(-ENOMEM); @@ -545,7 +545,7 @@ struct smack_known *smk_import_entry(const char *string, int len) if (skp != NULL) goto freeout; - skp = kzalloc(sizeof(*skp), GFP_KERNEL); + skp = kzalloc(sizeof(*skp), GFP_NOFS); if (skp == NULL) { skp = ERR_PTR(-ENOMEM); goto freeout; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index d683c41f0f6e51b7093831763a79f41be9ab81a6..e85774746ad5a2e8c9e32dff44162eaff4affdd0 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -268,7 +268,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip, if (!(ip->i_opflags & IOP_XATTR)) return ERR_PTR(-EOPNOTSUPP); - buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL); + buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS); if (buffer == NULL) return ERR_PTR(-ENOMEM); @@ -949,7 +949,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm) if (rc != 0) return rc; - } else if (bprm->unsafe) + } + if (bprm->unsafe & ~LSM_UNSAFE_PTRACE) return -EPERM; bsp->smk_task = isp->smk_task; @@ -4037,6 +4038,8 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) skp = smack_ipv6host_label(&sadd); if (skp == NULL) skp = smack_net_ambient; + if (skb == NULL) + break; #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = family; diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c index e2f27022b363c482ba2614a2579f87f2b19192e3..d7dddb75e7bb8dd8b1cc6724c1824f76ec83dda6 100644 --- a/sound/core/hrtimer.c +++ b/sound/core/hrtimer.c @@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void) timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; + timer->max_instances = 100; /* lower the limit */ err = snd_timer_global_register(timer); if (err < 0) { diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index a84a1d3d23e56e15c20710c221dfc52a6a743437..c6888d76ca5e98734d1fe9bfa10b3e85d2de328c 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames) while (plugin->next) { if (plugin->dst_frames) frames = plugin->dst_frames(plugin, frames); - if (snd_BUG_ON(frames <= 0)) + if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0)) return -ENXIO; plugin = plugin->next; err = snd_pcm_plugin_alloc(plugin, frames); @@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames) while (plugin->prev) { if (plugin->src_frames) frames = plugin->src_frames(plugin, frames); - if (snd_BUG_ON(frames <= 0)) + if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0)) return -ENXIO; plugin = plugin->prev; err = snd_pcm_plugin_alloc(plugin, frames); diff --git a/sound/core/seq/seq_system.c b/sound/core/seq/seq_system.c index 8ce1d0b40dce1f0821f9cf9c144fed1ed27e413d..ce1f1e4727ab1e6d96434359d168cf0d81f4462f 100644 --- a/sound/core/seq/seq_system.c +++ b/sound/core/seq/seq_system.c @@ -123,6 +123,7 @@ int __init snd_seq_system_client_init(void) { struct snd_seq_port_callback pcallbacks; struct snd_seq_port_info *port; + int err; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) @@ -144,7 +145,10 @@ int __init snd_seq_system_client_init(void) port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT; port->addr.client = sysclient; port->addr.port = SNDRV_SEQ_PORT_SYSTEM_TIMER; - snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port); + err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, + port); + if (err < 0) + goto error_port; /* register announcement port */ strcpy(port->name, "Announce"); @@ -154,16 +158,24 @@ int __init snd_seq_system_client_init(void) port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT; port->addr.client = sysclient; port->addr.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE; - snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port); + err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, + port); + if (err < 0) + goto error_port; announce_port = port->addr.port; kfree(port); return 0; + + error_port: + snd_seq_system_client_done(); + kfree(port); + return err; } /* unregister our internal client */ -void __exit snd_seq_system_client_done(void) +void snd_seq_system_client_done(void) { int oldsysclient = sysclient; diff --git a/sound/core/timer.c b/sound/core/timer.c index d3b62608394440442dabb0601f5733626dbdea9b..4eb085931d3f42dd2db6f544dc34d33faaf6f2cf 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -179,7 +179,7 @@ static void snd_timer_request(struct snd_timer_id *tid) * * call this with register_mutex down. */ -static void snd_timer_check_slave(struct snd_timer_instance *slave) +static int snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; @@ -189,16 +189,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave) list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { + if (master->timer->num_instances >= + master->timer->max_instances) + return -EBUSY; list_move_tail(&slave->open_list, &master->slave_list_head); + master->timer->num_instances++; spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); - return; + return 0; } } } + return 0; } /* @@ -207,7 +212,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave) * * call this with register_mutex down. */ -static void snd_timer_check_master(struct snd_timer_instance *master) +static int snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; @@ -215,7 +220,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master) list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { + if (master->timer->num_instances >= + master->timer->max_instances) + return -EBUSY; list_move_tail(&slave->open_list, &master->slave_list_head); + master->timer->num_instances++; spin_lock_irq(&slave_active_lock); spin_lock(&master->timer->lock); slave->master = master; @@ -227,8 +236,12 @@ static void snd_timer_check_master(struct snd_timer_instance *master) spin_unlock_irq(&slave_active_lock); } } + return 0; } +static int snd_timer_close_locked(struct snd_timer_instance *timeri, + struct device **card_devp_to_put); + /* * open a timer instance * when opening a master, the slave id must be here given. @@ -239,33 +252,37 @@ int snd_timer_open(struct snd_timer_instance **ti, { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; + struct device *card_dev_to_put = NULL; + int err; + mutex_lock(®ister_mutex); if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); - return -EINVAL; + err = -EINVAL; + goto unlock; } - mutex_lock(®ister_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { - mutex_unlock(®ister_mutex); - return -ENOMEM; + err = -ENOMEM; + goto unlock; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); - snd_timer_check_slave(timeri); - mutex_unlock(®ister_mutex); - *ti = timeri; - return 0; + err = snd_timer_check_slave(timeri); + if (err < 0) { + snd_timer_close_locked(timeri, &card_dev_to_put); + timeri = NULL; + } + goto unlock; } /* open a master instance */ - mutex_lock(®ister_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { @@ -276,21 +293,26 @@ int snd_timer_open(struct snd_timer_instance **ti, } #endif if (!timer) { - mutex_unlock(®ister_mutex); - return -ENODEV; + err = -ENODEV; + goto unlock; } if (!list_empty(&timer->open_list_head)) { - timeri = list_entry(timer->open_list_head.next, + struct snd_timer_instance *t = + list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); - if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { - mutex_unlock(®ister_mutex); - return -EBUSY; + if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { + err = -EBUSY; + goto unlock; } } + if (timer->num_instances >= timer->max_instances) { + err = -EBUSY; + goto unlock; + } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { - mutex_unlock(®ister_mutex); - return -ENOMEM; + err = -ENOMEM; + goto unlock; } /* take a card refcount for safe disconnection */ if (timer->card) @@ -299,38 +321,47 @@ int snd_timer_open(struct snd_timer_instance **ti, timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) { - int err = timer->hw.open(timer); + err = timer->hw.open(timer); if (err) { kfree(timeri->owner); kfree(timeri); + timeri = NULL; if (timer->card) - put_device(&timer->card->card_dev); + card_dev_to_put = &timer->card->card_dev; module_put(timer->module); - mutex_unlock(®ister_mutex); - return err; + goto unlock; } } list_add_tail(&timeri->open_list, &timer->open_list_head); - snd_timer_check_master(timeri); + timer->num_instances++; + err = snd_timer_check_master(timeri); + if (err < 0) { + snd_timer_close_locked(timeri, &card_dev_to_put); + timeri = NULL; + } + + unlock: mutex_unlock(®ister_mutex); + /* put_device() is called after unlock for avoiding deadlock */ + if (card_dev_to_put) + put_device(card_dev_to_put); *ti = timeri; - return 0; + return err; } +EXPORT_SYMBOL(snd_timer_open); /* * close a timer instance + * call this with register_mutex down. */ -int snd_timer_close(struct snd_timer_instance *timeri) +static int snd_timer_close_locked(struct snd_timer_instance *timeri, + struct device **card_devp_to_put) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; - if (snd_BUG_ON(!timeri)) - return -ENXIO; - - mutex_lock(®ister_mutex); list_del(&timeri->open_list); /* force to stop the timer */ @@ -338,6 +369,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) timer = timeri->timer; if (timer) { + timer->num_instances--; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { @@ -353,6 +385,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { list_move_tail(&slave->open_list, &snd_timer_slave_list); + timer->num_instances--; slave->master = NULL; slave->timer = NULL; list_del_init(&slave->ack_list); @@ -376,14 +409,34 @@ int snd_timer_close(struct snd_timer_instance *timeri) timer->hw.close(timer); /* release a card refcount for safe disconnection */ if (timer->card) - put_device(&timer->card->card_dev); + *card_devp_to_put = &timer->card->card_dev; module_put(timer->module); } - mutex_unlock(®ister_mutex); return 0; } +/* + * close a timer instance + */ +int snd_timer_close(struct snd_timer_instance *timeri) +{ + struct device *card_dev_to_put = NULL; + int err; + + if (snd_BUG_ON(!timeri)) + return -ENXIO; + + mutex_lock(®ister_mutex); + err = snd_timer_close_locked(timeri, &card_dev_to_put); + mutex_unlock(®ister_mutex); + /* put_device() is called after unlock for avoiding deadlock */ + if (card_dev_to_put) + put_device(card_dev_to_put); + return err; +} +EXPORT_SYMBOL(snd_timer_close); + unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; @@ -397,6 +450,7 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) } return 0; } +EXPORT_SYMBOL(snd_timer_resolution); static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { @@ -588,6 +642,7 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) else return snd_timer_start1(timeri, true, ticks); } +EXPORT_SYMBOL(snd_timer_start); /* * stop the timer instance. @@ -601,6 +656,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri) else return snd_timer_stop1(timeri, true); } +EXPORT_SYMBOL(snd_timer_stop); /* * start again.. the tick is kept. @@ -616,6 +672,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri) else return snd_timer_start1(timeri, false, 0); } +EXPORT_SYMBOL(snd_timer_continue); /* * pause.. remember the ticks left @@ -627,6 +684,7 @@ int snd_timer_pause(struct snd_timer_instance * timeri) else return snd_timer_stop1(timeri, false); } +EXPORT_SYMBOL(snd_timer_pause); /* * reschedule the timer @@ -808,6 +866,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) if (use_tasklet) tasklet_schedule(&timer->task_queue); } +EXPORT_SYMBOL(snd_timer_interrupt); /* @@ -846,6 +905,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); + timer->max_instances = 1000; /* default limit per timer */ if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); @@ -858,6 +918,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, *rtimer = timer; return 0; } +EXPORT_SYMBOL(snd_timer_new); static int snd_timer_free(struct snd_timer *timer) { @@ -977,6 +1038,7 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam } spin_unlock_irqrestore(&timer->lock, flags); } +EXPORT_SYMBOL(snd_timer_notify); /* * exported functions for global timers @@ -992,11 +1054,13 @@ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } +EXPORT_SYMBOL(snd_timer_global_new); int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } +EXPORT_SYMBOL(snd_timer_global_free); int snd_timer_global_register(struct snd_timer *timer) { @@ -1006,6 +1070,7 @@ int snd_timer_global_register(struct snd_timer *timer) dev.device_data = timer; return snd_timer_dev_register(&dev); } +EXPORT_SYMBOL(snd_timer_global_register); /* * System timer @@ -2125,17 +2190,3 @@ static void __exit alsa_timer_exit(void) module_init(alsa_timer_init) module_exit(alsa_timer_exit) - -EXPORT_SYMBOL(snd_timer_open); -EXPORT_SYMBOL(snd_timer_close); -EXPORT_SYMBOL(snd_timer_resolution); -EXPORT_SYMBOL(snd_timer_start); -EXPORT_SYMBOL(snd_timer_stop); -EXPORT_SYMBOL(snd_timer_continue); -EXPORT_SYMBOL(snd_timer_pause); -EXPORT_SYMBOL(snd_timer_new); -EXPORT_SYMBOL(snd_timer_notify); -EXPORT_SYMBOL(snd_timer_global_new); -EXPORT_SYMBOL(snd_timer_global_free); -EXPORT_SYMBOL(snd_timer_global_register); -EXPORT_SYMBOL(snd_timer_interrupt); diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c index f1109005794944cd759b90ede92a6fc601ab251e..d0a8736613a1dcaf66b490f37d871158239cae8e 100644 --- a/sound/firewire/bebob/bebob_focusrite.c +++ b/sound/firewire/bebob/bebob_focusrite.c @@ -28,6 +28,8 @@ #define SAFFIRE_CLOCK_SOURCE_SPDIF 1 /* clock sources as returned from register of Saffire Pro 10 and 26 */ +#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff +#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 #define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */ #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 @@ -190,6 +192,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) map = saffirepro_clk_maps[1]; /* In a case that this driver cannot handle the value of register. */ + value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK; if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { err = -EIO; goto end; diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 4d3034a68bdfbd587163566484eea57804260eb7..be2c056eb62df1b824de2af5995d5b6271908027 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -253,8 +253,7 @@ int snd_bebob_stream_get_clock_src(struct snd_bebob *bebob, return err; } -static unsigned int -map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s) +static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s) { unsigned int sec, sections, ch, channels; unsigned int pcm, midi, location; diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c index 48d6dca471c6bc5713cd7a6ee7bc466c008a1997..6c8daf5b391ff22d0ec1245758a96e576f77b2fd 100644 --- a/sound/firewire/isight.c +++ b/sound/firewire/isight.c @@ -639,7 +639,7 @@ static int isight_probe(struct fw_unit *unit, if (!isight->audio_base) { dev_err(&unit->device, "audio unit base not found\n"); err = -ENXIO; - goto err_unit; + goto error; } fw_iso_resources_init(&isight->resources, unit); @@ -668,12 +668,12 @@ static int isight_probe(struct fw_unit *unit, dev_set_drvdata(&unit->device, isight); return 0; - -err_unit: - fw_unit_put(isight->unit); - mutex_destroy(&isight->mutex); error: snd_card_free(card); + + mutex_destroy(&isight->mutex); + fw_unit_put(isight->unit); + return err; } diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c index 79db1b651f5c59e2b2af1265eea68e13f14433fe..984749ee8145cbf9bb1d7dae6695824da507cc70 100644 --- a/sound/firewire/tascam/tascam-pcm.c +++ b/sound/firewire/tascam/tascam-pcm.c @@ -81,6 +81,9 @@ static int pcm_open(struct snd_pcm_substream *substream) goto err_locked; err = snd_tscm_stream_get_clock(tscm, &clock); + if (err < 0) + goto err_locked; + if (clock != SND_TSCM_CLOCK_INTERNAL || amdtp_stream_pcm_running(&tscm->rx_stream) || amdtp_stream_pcm_running(&tscm->tx_stream)) { diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c index f1657a4e0621ef4999349477ce6e71fdbcd7f411..a1308f12a65b099c8b484326a0630087eae79372 100644 --- a/sound/firewire/tascam/tascam-stream.c +++ b/sound/firewire/tascam/tascam-stream.c @@ -9,20 +9,37 @@ #include #include "tascam.h" +#define CLOCK_STATUS_MASK 0xffff0000 +#define CLOCK_CONFIG_MASK 0x0000ffff + #define CALLBACK_TIMEOUT 500 static int get_clock(struct snd_tscm *tscm, u32 *data) { + int trial = 0; __be32 reg; int err; - err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST, - TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS, - ®, sizeof(reg), 0); - if (err >= 0) + while (trial++ < 5) { + err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST, + TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS, + ®, sizeof(reg), 0); + if (err < 0) + return err; + *data = be32_to_cpu(reg); + if (*data & CLOCK_STATUS_MASK) + break; - return err; + // In intermediate state after changing clock status. + msleep(50); + } + + // Still in the intermediate state. + if (trial >= 5) + return -EAGAIN; + + return 0; } static int set_clock(struct snd_tscm *tscm, unsigned int rate, @@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate, err = get_clock(tscm, &data); if (err < 0) return err; - data &= 0x0000ffff; + data &= CLOCK_CONFIG_MASK; if (rate > 0) { data &= 0x000000ff; @@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate, int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate) { - u32 data = 0x0; - unsigned int trials = 0; + u32 data; int err; - while (data == 0x0 || trials++ < 5) { - err = get_clock(tscm, &data); - if (err < 0) - return err; + err = get_clock(tscm, &data); + if (err < 0) + return err; - data = (data & 0xff000000) >> 24; - } + data = (data & 0xff000000) >> 24; /* Check base rate. */ if ((data & 0x0f) == 0x01) diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c index 7e21621e492a4b2c83e9f296d9f472a581eb97b2..7fd1b40008838919f07067a81df02e355e1819f0 100644 --- a/sound/i2c/cs8427.c +++ b/sound/i2c/cs8427.c @@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device, struct cs8427 *chip = device->private_data; char *hw_data = udata ? chip->playback.hw_udata : chip->playback.hw_status; - char data[32]; + unsigned char data[32]; int err, idx; if (!memcmp(hw_data, ndata, count)) diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c index bf377dc192aa7f66be479449535acce1fb2f196f..d33e02c3171293c6c09e1ce0063d820ffbcbf52e 100644 --- a/sound/i2c/other/ak4xxx-adda.c +++ b/sound/i2c/other/ak4xxx-adda.c @@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak) return err; memset(&knew, 0, sizeof(knew)); - knew.name = ak->adc_info[mixer_ch].selector_name; - if (!knew.name) { + if (!ak->adc_info || + !ak->adc_info[mixer_ch].selector_name) { knew.name = "Capture Channel"; knew.index = mixer_ch + ak->idx_offset * 2; - } + } else + knew.name = ak->adc_info[mixer_ch].selector_name; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.info = ak4xxx_capture_source_info; diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 85dbe2420248db8b0f013b06086d18217c4e5fa4..c5e82329348b3d62101a5f433751986845f2e504 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -866,10 +866,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, */ if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) { hbus->response_reset = 1; + dev_err(chip->card->dev, + "No response from codec, resetting bus: last cmd=0x%08x\n", + bus->last_cmd[addr]); return -EAGAIN; /* give a chance to retry */ } - dev_err(chip->card->dev, + dev_WARN(chip->card->dev, "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", bus->last_cmd[addr]); chip->single_cmd = 1; diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index e0fb8c6d1bc274e4ff5f6a8bf55afa9852bf9026..7d65c6df9aa8a3ecb5b0c255e43faec3e17db97b 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -370,6 +370,7 @@ static const struct hda_fixup ad1986a_fixups[] = { static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC), + SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC), SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD), SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD), SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK), diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 2809999612260767dc6da6398923f548e426945e..475b2c6c43d61cdad5b07d3f65ae959088b6f0c6 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4440,7 +4440,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) /* Delay enabling the HP amp, to let the mic-detection * state machine run. */ - cancel_delayed_work_sync(&spec->unsol_hp_work); + cancel_delayed_work(&spec->unsol_hp_work); schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); tbl = snd_hda_jack_tbl_get(codec, cb->nid); if (tbl) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 31322aeebcff00d312699c6e59ccd6d735003295..a64612db1f155f1c0787cbb79f42628f06079e38 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -353,6 +353,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0700: case 0x10ec0701: case 0x10ec0703: + case 0x10ec0711: alc_update_coef_idx(codec, 0x10, 1<<15, 0); break; case 0x10ec0662: @@ -969,6 +970,9 @@ static const struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1), SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), + /* blacklist -- no beep available */ + SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0), + SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0), {} }; @@ -6421,6 +6425,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0700: case 0x10ec0701: case 0x10ec0703: + case 0x10ec0711: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ @@ -7461,6 +7466,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269), HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269), HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269), HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662), HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 0abab7926dca3fe835decb2d28030b214a2adee0..d1a6d20ace0da42d18cc6c786b8fbb6e276f4948 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -77,6 +77,7 @@ enum { STAC_DELL_M6_BOTH, STAC_DELL_EQ, STAC_ALIENWARE_M17X, + STAC_ELO_VUPOINT_15MX, STAC_92HD89XX_HP_FRONT_JACK, STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, STAC_92HD73XX_ASUS_MOBO, @@ -1875,6 +1876,18 @@ static void stac92hd73xx_fixup_no_jd(struct hda_codec *codec, codec->no_jack_detect = 1; } + +static void stac92hd73xx_disable_automute(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct sigmatel_spec *spec = codec->spec; + + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + spec->gen.suppress_auto_mute = 1; +} + static const struct hda_fixup stac92hd73xx_fixups[] = { [STAC_92HD73XX_REF] = { .type = HDA_FIXUP_FUNC, @@ -1900,6 +1913,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = stac92hd73xx_fixup_alienware_m17x, }, + [STAC_ELO_VUPOINT_15MX] = { + .type = HDA_FIXUP_FUNC, + .v.func = stac92hd73xx_disable_automute, + }, [STAC_92HD73XX_INTEL] = { .type = HDA_FIXUP_PINS, .v.pins = intel_dg45id_pin_configs, @@ -1938,6 +1955,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = { { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, { .id = STAC_DELL_EQ, .name = "dell-eq" }, { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, + { .id = STAC_ELO_VUPOINT_15MX, .name = "elo-vupoint-15mx" }, { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" }, {} }; @@ -1987,6 +2005,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { "Alienware M17x", STAC_ALIENWARE_M17X), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, "Alienware M17x R3", STAC_DELL_EQ), + SND_PCI_QUIRK(0x1059, 0x1011, + "ELO VuPoint 15MX", STAC_ELO_VUPOINT_15MX), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927, "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c index 1bc98c867133d8e302c6fd2d74502c27f2355ed3..2286dfd72ff7e814d0dc39a201973630d64150da 100644 --- a/sound/pci/intel8x0m.c +++ b/sound/pci/intel8x0m.c @@ -1171,16 +1171,6 @@ static int snd_intel8x0m_create(struct snd_card *card, } port_inited: - if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, - KBUILD_MODNAME, chip)) { - dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq); - snd_intel8x0m_free(chip); - return -EBUSY; - } - chip->irq = pci->irq; - pci_set_master(pci); - synchronize_irq(chip->irq); - /* initialize offsets */ chip->bdbars_count = 2; tbl = intel_regs; @@ -1224,11 +1214,21 @@ static int snd_intel8x0m_create(struct snd_card *card, chip->int_sta_reg = ICH_REG_GLOB_STA; chip->int_sta_mask = int_sta_masks; + pci_set_master(pci); + if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) { snd_intel8x0m_free(chip); return err; } + if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED, + KBUILD_MODNAME, chip)) { + dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq); + snd_intel8x0m_free(chip); + return -EBUSY; + } + chip->irq = pci->irq; + if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_intel8x0m_free(chip); return err; diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index c602c4960924c1553c6e2b54450e4dbe5fbc5f92..88355d1719a30fa29bf22f2377218f2f7c35b90a 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -1267,6 +1267,12 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac, if (ret) return ret; + /* Filter out 44.1, 88.2 and 176.4Khz */ + rates &= ~(SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 | + SNDRV_PCM_RATE_176400); + if (!rates) + return -EINVAL; + sprintf(dai_name, "intel-hdmi-hifi%d", i+1); hdmi_dais[i].name = devm_kstrdup(&hdac->dev, dai_name, GFP_KERNEL); diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 3dba5550a6659152df5c8feec4e00daa2bbd33e7..39810b713d5f2b70275042bae68797254e561a06 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -35,6 +35,13 @@ #define SGTL5000_DAP_REG_OFFSET 0x0100 #define SGTL5000_MAX_REG_OFFSET 0x013A +/* Delay for the VAG ramp up */ +#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */ +/* Delay for the VAG ramp down */ +#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */ + +#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE) + /* default value of sgtl5000 registers */ static const struct reg_default sgtl5000_reg_defaults[] = { { SGTL5000_CHIP_DIG_POWER, 0x0000 }, @@ -99,6 +106,13 @@ enum sgtl5000_micbias_resistor { SGTL5000_MICBIAS_8K = 8, }; +enum { + HP_POWER_EVENT, + DAC_POWER_EVENT, + ADC_POWER_EVENT, + LAST_POWER_EVENT = ADC_POWER_EVENT +}; + /* sgtl5000 private structure in codec */ struct sgtl5000_priv { int sysclk; /* sysclk rate */ @@ -111,8 +125,117 @@ struct sgtl5000_priv { int revision; u8 micbias_resistor; u8 micbias_voltage; + u16 mute_state[LAST_POWER_EVENT + 1]; }; +static inline int hp_sel_input(struct snd_soc_component *component) +{ + unsigned int ana_reg = 0; + + snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &ana_reg); + + return (ana_reg & SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT; +} + +static inline u16 mute_output(struct snd_soc_component *component, + u16 mute_mask) +{ + unsigned int mute_reg = 0; + + snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &mute_reg); + + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL, + mute_mask, mute_mask); + return mute_reg; +} + +static inline void restore_output(struct snd_soc_component *component, + u16 mute_mask, u16 mute_reg) +{ + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL, + mute_mask, mute_reg); +} + +static void vag_power_on(struct snd_soc_component *component, u32 source) +{ + unsigned int ana_reg = 0; + + snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_reg); + + if (ana_reg & SGTL5000_VAG_POWERUP) + return; + + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); + + /* When VAG powering on to get local loop from Line-In, the sleep + * is required to avoid loud pop. + */ + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN && + source == HP_POWER_EVENT) + msleep(SGTL5000_VAG_POWERUP_DELAY); +} + +static int vag_power_consumers(struct snd_soc_component *component, + u16 ana_pwr_reg, u32 source) +{ + int consumers = 0; + + /* count dac/adc consumers unconditional */ + if (ana_pwr_reg & SGTL5000_DAC_POWERUP) + consumers++; + if (ana_pwr_reg & SGTL5000_ADC_POWERUP) + consumers++; + + /* + * If the event comes from HP and Line-In is selected, + * current action is 'DAC to be powered down'. + * As HP_POWERUP is not set when HP muxed to line-in, + * we need to keep VAG power ON. + */ + if (source == HP_POWER_EVENT) { + if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN) + consumers++; + } else { + if (ana_pwr_reg & SGTL5000_HP_POWERUP) + consumers++; + } + + return consumers; +} + +static void vag_power_off(struct snd_soc_component *component, u32 source) +{ + unsigned int ana_pwr = SGTL5000_VAG_POWERUP; + + snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_pwr); + + if (!(ana_pwr & SGTL5000_VAG_POWERUP)) + return; + + /* + * This function calls when any of VAG power consumers is disappearing. + * Thus, if there is more than one consumer at the moment, as minimum + * one consumer will definitely stay after the end of the current + * event. + * Don't clear VAG_POWERUP if 2 or more consumers of VAG present: + * - LINE_IN (for HP events) / HP (for DAC/ADC events) + * - DAC + * - ADC + * (the current consumer is disappearing right now) + */ + if (vag_power_consumers(component, ana_pwr, source) >= 2) + return; + + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, 0); + /* In power down case, we need wait 400-1000 ms + * when VAG fully ramped down. + * As longer we wait, as smaller pop we've got. + */ + msleep(SGTL5000_VAG_POWERDOWN_DELAY); +} + /* * mic_bias power on/off share the same register bits with * output impedance of mic bias, when power on mic bias, we @@ -144,36 +267,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w, return 0; } -/* - * As manual described, ADC/DAC only works when VAG powerup, - * So enabled VAG before ADC/DAC up. - * In power down case, we need wait 400ms when vag fully ramped down. - */ -static int power_vag_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) +static int vag_and_mute_control(struct snd_soc_component *component, + int event, int event_source) { - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; + static const u16 mute_mask[] = { + /* + * Mask for HP_POWER_EVENT. + * Muxing Headphones have to be wrapped with mute/unmute + * headphones only. + */ + SGTL5000_HP_MUTE, + /* + * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT. + * Muxing DAC or ADC block have to be wrapped with mute/unmute + * both headphones and line-out. + */ + SGTL5000_OUTPUTS_MUTE, + SGTL5000_OUTPUTS_MUTE + }; + + struct sgtl5000_priv *sgtl5000 = + snd_soc_component_get_drvdata(component); switch (event) { + case SND_SOC_DAPM_PRE_PMU: + sgtl5000->mute_state[event_source] = + mute_output(component, mute_mask[event_source]); + break; case SND_SOC_DAPM_POST_PMU: - snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); - msleep(400); + vag_power_on(component, event_source); + restore_output(component, mute_mask[event_source], + sgtl5000->mute_state[event_source]); break; - case SND_SOC_DAPM_PRE_PMD: - /* - * Don't clear VAG_POWERUP, when both DAC and ADC are - * operational to prevent inadvertently starving the - * other one of them. - */ - if ((snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER) & - mask) != mask) { - snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VAG_POWERUP, 0); - msleep(400); - } + sgtl5000->mute_state[event_source] = + mute_output(component, mute_mask[event_source]); + vag_power_off(component, event_source); + break; + case SND_SOC_DAPM_POST_PMD: + restore_output(component, mute_mask[event_source], + sgtl5000->mute_state[event_source]); break; default: break; @@ -182,6 +315,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, return 0; } +/* + * Mute Headphone when power it up/down. + * Control VAG power on HP power path. + */ +static int headphone_pga_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = + snd_soc_dapm_to_component(w->dapm); + + return vag_and_mute_control(component, event, HP_POWER_EVENT); +} + +/* As manual describes, ADC/DAC powering up/down requires + * to mute outputs to avoid pops. + * Control VAG power on ADC/DAC power path. + */ +static int adc_updown_depop(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = + snd_soc_dapm_to_component(w->dapm); + + return vag_and_mute_control(component, event, ADC_POWER_EVENT); +} + +static int dac_updown_depop(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = + snd_soc_dapm_to_component(w->dapm); + + return vag_and_mute_control(component, event, DAC_POWER_EVENT); +} + /* input sources for ADC */ static const char *adc_mux_text[] = { "MIC_IN", "LINE_IN" @@ -217,7 +385,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = { mic_bias_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), - SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0), + SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0, + headphone_pga_event, + SND_SOC_DAPM_PRE_POST_PMU | + SND_SOC_DAPM_PRE_POST_PMD), SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0), SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux), @@ -233,11 +404,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = { 0, SGTL5000_CHIP_DIG_POWER, 1, 0), - SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0), - SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0), - - SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event), - SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event), + SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0, + adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU | + SND_SOC_DAPM_PRE_POST_PMD), + SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0, + dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU | + SND_SOC_DAPM_PRE_POST_PMD), }; /* routes for sgtl5000 */ @@ -987,12 +1159,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) SGTL5000_INT_OSC_EN); /* Enable VDDC charge pump */ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; - } else if (vddio >= 3100 && vdda >= 3100) { + } else { ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; - /* VDDC use VDDIO rail */ - lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; - lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << - SGTL5000_VDDC_MAN_ASSN_SHIFT; + /* + * if vddio == vdda the source of charge pump should be + * assigned manually to VDDIO + */ + if (vddio == vdda) { + lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; + lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << + SGTL5000_VDDC_MAN_ASSN_SHIFT; + } } snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl); @@ -1040,7 +1217,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) * Searching for a suitable index solving this formula: * idx = 40 * log10(vag_val / lo_cagcntrl) + 15 */ - vol_quot = (vag * 100) / lo_vag; + vol_quot = lo_vag ? (vag * 100) / lo_vag : 0; lo_vol = 0; for (i = 0; i < ARRAY_SIZE(vol_quot_table); i++) { if (vol_quot >= vol_quot_table[i]) diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index c03c9da076c2d43fe254808a3b65965cc4d77625..28eb55bc46634d3fe3610796a9c81a174c5c0a16 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -948,8 +948,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len) } if (in) { - if (in & WMFW_CTL_FLAG_READABLE) - out |= rd; + out |= rd; if (in & WMFW_CTL_FLAG_WRITEABLE) out |= wr; if (in & WMFW_CTL_FLAG_VOLATILE) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 1c03490e1182b639802a61ca73b3763673eef171..7cd2a5aed15a3f75dacdd916bbf49f337d902fb8 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1431,6 +1431,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) struct fsl_ssi_private *ssi_private; int ret = 0; struct device_node *np = pdev->dev.of_node; + struct device_node *root; const struct of_device_id *of_id; const char *p, *sprop; const uint32_t *iprop; @@ -1620,7 +1621,9 @@ static int fsl_ssi_probe(struct platform_device *pdev) * device tree. We also pass the address of the CPU DAI driver * structure. */ - sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL); + root = of_find_node_by_path("/"); + sprop = of_get_property(root, "compatible", NULL); + of_node_put(root); /* Sometimes the compatible name has a "fsl," prefix, so we strip it. */ p = strrchr(sprop, ','); if (p) diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c index 6c672ac79cce7b3b12e1a7cb65fdb8e8c4456f15..9d24cb719f6961d4092a40c24f8235de9c234716 100644 --- a/sound/soc/intel/common/sst-ipc.c +++ b/sound/soc/intel/common/sst-ipc.c @@ -211,6 +211,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc, if (ipc->ops.reply_msg_match != NULL) header = ipc->ops.reply_msg_match(header, &mask); + else + mask = (u64)-1; if (list_empty(&ipc->rx_list)) { dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n", diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index dcf03691ebc81e33927656483aa73e1b9647d529..d8cf37a0f696d1b19c7f4760cd4dea9ecff8e25c 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -211,7 +211,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl) struct hdac_bus *bus = ebus_to_hbus(&skl->ebus); struct device *dev = bus->dev; - dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n", + dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n", nhlt->header.oem_id, nhlt->header.oem_table_id, nhlt->header.oem_revision); diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 08bfee447a36595159ef6abe64223c20c245940a..94b6f9c7dd6bb7546ff00418c91e203a81dc8806 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -649,7 +649,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev) ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) { dev_err(&pdev->dev, "Could not register PCM\n"); - return ret; + goto err_suspend; } return 0; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 91b444db575e5d1191a6c589e5b3bc23a537842d..5346b3cafc6704e0095acf4b04b3cb7b382345cc 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -629,6 +629,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } /* set format */ + rdai->bit_clk_inv = 0; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: rdai->sys_delay = 0; diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 6cef3977507ae15c11ff074c5e33c0b275cef9c8..67d22b4baeb0544288fc81410f31fa1779716dd5 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -312,6 +312,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd) if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i])) pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE; + + if (rtd->pcm->streams[i].pcm->name[0] == '\0') { + strncpy(rtd->pcm->streams[i].pcm->name, + rtd->pcm->streams[i].pcm->id, + sizeof(rtd->pcm->streams[i].pcm->name)); + } } return 0; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index c4c11ae8baae68ea2906bc0111a8b65371f1b086..b396b475ec63312ca1b06ae2e3f9942e80de3463 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1710,7 +1710,7 @@ static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime, u64 formats) { runtime->hw.rate_min = stream->rate_min; - runtime->hw.rate_max = stream->rate_max; + runtime->hw.rate_max = min_not_zero(stream->rate_max, UINT_MAX); runtime->hw.channels_min = stream->channels_min; runtime->hw.channels_max = stream->channels_max; if (runtime->hw.formats) diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c index 1e76869dd4880eddbcb6a939dc186a1bddd2bdab..863e04809a6b8b3af51735a3e71453a4d3129861 100644 --- a/sound/soc/tegra/tegra_sgtl5000.c +++ b/sound/soc/tegra/tegra_sgtl5000.c @@ -152,14 +152,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Property 'nvidia,i2s-controller' missing/invalid\n"); ret = -EINVAL; - goto err; + goto err_put_codec_of_node; } tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node; ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev); if (ret) - goto err; + goto err_put_cpu_of_node; ret = snd_soc_register_card(card); if (ret) { @@ -172,6 +172,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev) err_fini_utils: tegra_asoc_utils_fini(&machine->util_data); +err_put_cpu_of_node: + of_node_put(tegra_sgtl5000_dai.cpu_of_node); + tegra_sgtl5000_dai.cpu_of_node = NULL; + tegra_sgtl5000_dai.platform_of_node = NULL; +err_put_codec_of_node: + of_node_put(tegra_sgtl5000_dai.codec_of_node); + tegra_sgtl5000_dai.codec_of_node = NULL; err: return ret; } @@ -186,6 +193,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev) tegra_asoc_utils_fini(&machine->util_data); + of_node_put(tegra_sgtl5000_dai.cpu_of_node); + tegra_sgtl5000_dai.cpu_of_node = NULL; + tegra_sgtl5000_dai.platform_of_node = NULL; + of_node_put(tegra_sgtl5000_dai.codec_of_node); + tegra_sgtl5000_dai.codec_of_node = NULL; + return ret; } diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 6bd424b8e073d24018a35a94438d9ba121cb4503..feb60f4762c0f5633bc091e7353c15935242c389 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -403,6 +403,9 @@ static void snd_complete_urb(struct urb *urb) } prepare_outbound_urb(ep, ctx); + /* can be stopped during prepare callback */ + if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) + goto exit_clear; } else { retire_inbound_urb(ep, ctx); /* can be stopped during retire callback */ diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 63ed4793885e9d5998bb10f059047cb0afe3ddac..503278656508ad6f13f99a70da795d496841d65d 100755 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1137,7 +1137,8 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, if (cval->min + cval->res < cval->max) { int last_valid_res = cval->res; int saved, test, check; - get_cur_mix_raw(cval, minchn, &saved); + if (get_cur_mix_raw(cval, minchn, &saved) < 0) + goto no_res_check; for (;;) { test = saved; if (test < cval->max) @@ -1157,6 +1158,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, snd_usb_set_cur_mix_value(cval, minchn, 0, saved); } +no_res_check: cval->initialized = 1; } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index f61bf8421ed2412bc56e2e1eccfb17348cd0b16e..adee1c0e2bdfa7a30efcebd6da58bcab8d619136 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -472,6 +472,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, } ep = get_endpoint(alts, 1)->bEndpointAddress; if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && + get_endpoint(alts, 0)->bSynchAddress != 0 && ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { dev_err(&dev->dev, diff --git a/tools/gpio/Build b/tools/gpio/Build index 620c1937d9573e960828c52464c61ca4b78ede4f..4141f35837db33d97b195be533f58954146ac9a6 100644 --- a/tools/gpio/Build +++ b/tools/gpio/Build @@ -1,3 +1,4 @@ +gpio-utils-y += gpio-utils.o lsgpio-y += lsgpio.o gpio-utils.o gpio-hammer-y += gpio-hammer.o gpio-utils.o gpio-event-mon-y += gpio-event-mon.o gpio-utils.o diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 250a891e6ef0ecd082da136102b3c5a7a6f33a7a..359dd5d11c811deb7b569fccbd7d60245b41c719 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile @@ -32,11 +32,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h prepare: $(OUTPUT)include/linux/gpio.h +GPIO_UTILS_IN := $(output)gpio-utils-in.o +$(GPIO_UTILS_IN): prepare FORCE + $(Q)$(MAKE) $(build)=gpio-utils + # # lsgpio # LSGPIO_IN := $(OUTPUT)lsgpio-in.o -$(LSGPIO_IN): prepare FORCE +$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=lsgpio $(OUTPUT)lsgpio: $(LSGPIO_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ @@ -45,7 +49,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN) # gpio-hammer # GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o -$(GPIO_HAMMER_IN): prepare FORCE +$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=gpio-hammer $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ @@ -54,7 +58,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) # gpio-event-mon # GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o -$(GPIO_EVENT_MON_IN): prepare FORCE +$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=gpio-event-mon $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 7851df1490e0a81f6a17776bbdb464f564a99894..cc3315da6dc397d6ef672089ce6a34577e1957b1 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -54,15 +54,15 @@ set_plugin_dir := 1 # Set plugin_dir to preffered global plugin location # If we install under $HOME directory we go under -# $(HOME)/.traceevent/plugins +# $(HOME)/.local/lib/traceevent/plugins # # We dont set PLUGIN_DIR in case we install under $HOME # directory, because by default the code looks under: -# $(HOME)/.traceevent/plugins by default. +# $(HOME)/.local/lib/traceevent/plugins by default. # ifeq ($(plugin_dir),) ifeq ($(prefix),$(HOME)) -override plugin_dir = $(HOME)/.traceevent/plugins +override plugin_dir = $(HOME)/.local/lib/traceevent/plugins set_plugin_dir := 0 else override plugin_dir = $(libdir)/traceevent/plugins diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index def61125ac36d3d8e419af64aa9cfedfdbd84c0e..62f4cacf253ab6714e72b4a5283522dd1d8bb67f 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -267,10 +267,10 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid) errno = ENOMEM; return -1; } + pevent->cmdlines = cmdlines; cmdlines[pevent->cmdline_count].comm = strdup(comm); if (!cmdlines[pevent->cmdline_count].comm) { - free(cmdlines); errno = ENOMEM; return -1; } @@ -281,7 +281,6 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid) pevent->cmdline_count++; qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp); - pevent->cmdlines = cmdlines; return 0; } diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c index a16756ae352679a433d4321f79bb083bd8294633..5fe7889606a23720adb846b13b9837b257079b65 100644 --- a/tools/lib/traceevent/event-plugin.c +++ b/tools/lib/traceevent/event-plugin.c @@ -30,7 +30,7 @@ #include "event-parse.h" #include "event-utils.h" -#define LOCAL_PLUGIN_DIR ".traceevent/plugins" +#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/" static struct registered_plugin_options { struct registered_plugin_options *next; diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 884d4f1ed0c1e222f315808ffe6b78cd12e26162..84756140a8a8ed5967f9e7830acf34e5b349c656 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -32,7 +32,7 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) +CFLAGS := -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) LDFLAGS += -lelf $(LIBSUBCMD) # Allow old libelf to be used: diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk index a3d2c62fd805772c0c9d56fe57b80a9d058cf3ff..0a3ad5dd1e8bfb8f6d6f7192030af0ad90841f9f 100644 --- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk +++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk @@ -68,7 +68,7 @@ BEGIN { lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix3_expr = "\\((F2|!F3|66&F2)\\)" lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 @@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod) return add_flags(imm, mod) } -/^[0-9a-f]+\:/ { +/^[0-9a-f]+:/ { if (NR == 1) next # get index diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index d426dcb18ce9a0d9756c274844bc84af9bcc5b0a..496a4ca1166712c0c0097d569593a7505a201c3d 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -674,6 +674,7 @@ static char *compact_gfp_flags(char *gfp_flags) new = realloc(new_flags, len + strlen(cpt) + 2); if (new == NULL) { free(new_flags); + free(orig_flags); return NULL; } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 43d5f35e90747e86c7eb864436fbf1c42b5ee47e..5cb58f3afa3559bc4c68926fcc0854a18bd927c8 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2564,8 +2564,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) fprintf(output, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); + if (run_idx != 0) + perf_evlist__reset_prev_raw_counts(evsel_list); + status = run_perf_stat(argc, argv); - if (forever && status != -1) { + if (forever && status != -1 && !interval) { print_counters(NULL, argc, argv); perf_stat__reset_stats(); } diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 016d12af68773dfac48cf04969ac4269f2b07a9c..0619054bd7a0dec50b4cfd8c82f2232f18bf8f9c 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -311,12 +311,12 @@ static struct fixed { const char *name; const char *event; } fixed[] = { - { "inst_retired.any", "event=0xc0" }, - { "inst_retired.any_p", "event=0xc0" }, - { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, - { "cpu_clk_unhalted.thread", "event=0x3c" }, - { "cpu_clk_unhalted.core", "event=0x3c" }, - { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, + { "inst_retired.any", "event=0xc0,period=2000003" }, + { "inst_retired.any_p", "event=0xc0,period=2000003" }, + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" }, + { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" }, + { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" }, + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" }, { NULL, NULL}, }; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 258b19b251a82b0d32d41abb9e30036e24e2ab03..b3d947b98a7c23408220fbc9fafbbea1d9aa5782 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -949,7 +949,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); if (sysfs__read_str(file, &cache->map, &len)) { - free(cache->map); + free(cache->size); free(cache->type); return -1; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 82833ceba339a6bb52b8a60a4b405d7105e79e32..32f991d284979e2b9c2aa2cc8be3d6997c3f6b47 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1485,7 +1485,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) return 0; } -static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) +static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b) { struct hists *hists = a->hists; struct perf_hpp_fmt *fmt; diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index 95f0884aae0286078681ecf19546546f9d6fb2a9..7e2e8aa95467984e4a59578f0710e923bd26ecd6 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c @@ -369,7 +369,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) size_t size; u16 idr_size; const char *sym; - uint32_t count; + uint64_t count; int ret, csize; pid_t pid, tid; struct { @@ -391,7 +391,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) return -1; filename = event->mmap2.filename; - size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so", + size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so", jd->dir, pid, count); @@ -493,7 +493,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr) return -1; filename = event->mmap2.filename; - size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64, + size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so", jd->dir, pid, jr->move.code_index); diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 621f6527b79091eecce238953a4497d14237cc7a..4887dd5eb80fcaf449dc07ec9fef5f573a01505a 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -220,14 +220,14 @@ static int detect_kbuild_dir(char **kbuild_dir) const char *prefix_dir = ""; const char *suffix_dir = ""; + /* _UTSNAME_LENGTH is 65 */ + char release[128]; + char *autoconf_path; int err; if (!test_dir) { - /* _UTSNAME_LENGTH is 65 */ - char release[128]; - err = fetch_kernel_version(NULL, release, sizeof(release)); if (err) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index c662fef95d1444f243195e90370a7d5e97d06856..df6892596dc27cba2206f290d94ecf28791caba0 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -1,4 +1,5 @@ #include "symbol.h" +#include #include #include #include @@ -716,6 +717,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp } after->start = map->end; + after->pgoff += map->end - pos->start; + assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); __map_groups__insert(pos->groups, after); if (verbose >= 2) map__fprintf(after, fp); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 39345c2ddfc22edcfde844e5eb95b72930f4eeda..d4f872f1750e68c9cd20bf8d52436a30cc5bd1dd 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -145,6 +145,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) evsel->prev_raw_counts = NULL; } +static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel) +{ + if (evsel->prev_raw_counts) { + evsel->prev_raw_counts->aggr.val = 0; + evsel->prev_raw_counts->aggr.ena = 0; + evsel->prev_raw_counts->aggr.run = 0; + } +} + static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) { int ncpus = perf_evsel__nr_cpus(evsel); @@ -195,6 +204,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist) } } +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each_entry(evlist, evsel) + perf_evsel__reset_prev_raw_counts(evsel); +} + static void zero_per_pkg(struct perf_evsel *counter) { if (counter->per_pkg_mask) diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index c29bb94c48a4b6070a659e3674f4ae84694724f9..b8845aceac31a2e0bebb60acf731c0f205ece113 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -94,6 +94,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw); void perf_evlist__free_stats(struct perf_evlist *evlist); void perf_evlist__reset_stats(struct perf_evlist *evlist); +void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist); int perf_stat_process_counter(struct perf_stat_config *config, struct perf_evsel *counter); diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c index 7ff46be908f0bb278e1e5ceb544b15fa353b54a2..d426fec3b1d34d1f244afc808688923a5251eb91 100644 --- a/tools/power/acpi/tools/acpidump/apmain.c +++ b/tools/power/acpi/tools/acpidump/apmain.c @@ -139,7 +139,7 @@ static int ap_insert_action(char *argument, u32 to_be_done) current_action++; if (current_action > AP_MAX_ACTIONS) { - fprintf(stderr, "Too many table options (max %u)\n", + fprintf(stderr, "Too many table options (max %d)\n", AP_MAX_ACTIONS); return (-1); } diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index b4c5d96e54c123dab18b2464949b356e50e4601f..7c2c8e74aa9a99d6c6dbb657772bc9c904a74012 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3593,7 +3593,7 @@ int initialize_counters(int cpu_id) void allocate_output_buffer() { - output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); + output_buffer = calloc(1, (1 + topo.num_cpus) * 2048); outp = output_buffer; if (outp == NULL) err(-1, "calloc output buffer"); diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 231bcd2c4eb59e9d3528be1e181650424fe13130..1e7ac6f3362ff26cfd1ea09931482e8a607ca867 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -71,8 +71,11 @@ test_badarg "\$stackp" "\$stack0+10" "\$stack1-10" echo "r ${PROBEFUNC} \$retval" > kprobe_events ! echo "p ${PROBEFUNC} \$retval" > kprobe_events +# $comm was introduced in 4.8, older kernels reject it. +if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then : "Comm access" test_goodarg "\$comm" +fi : "Indirect memory access" test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \ diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c index 90958aaaafb93dc826028fc1c27e230ca8898186..2737d6a595f49685fbced10804f2d27281523fe8 100644 --- a/tools/testing/selftests/net/reuseport_dualstack.c +++ b/tools/testing/selftests/net/reuseport_dualstack.c @@ -128,7 +128,7 @@ static void test(int *rcv_fds, int count, int proto) { struct epoll_event ev; int epfd, i, test_fd; - uint16_t test_family; + int test_family; socklen_t len; epfd = epoll_create(1); @@ -145,6 +145,7 @@ static void test(int *rcv_fds, int count, int proto) send_from_v4(proto); test_fd = receive_once(epfd, proto); + len = sizeof(test_family); if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len)) error(1, errno, "failed to read socket domain"); if (test_family != AF_INET) diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c index 6ff7b601f854562090b348097963554113e3fb75..4bb905925b0e4807f8202a569f1363d103be9625 100644 --- a/tools/usb/usbip/libsrc/usbip_host_common.c +++ b/tools/usb/usbip/libsrc/usbip_host_common.c @@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) int size; int fd; int length; - char status; + char status[2] = { 0 }; int value = 0; size = snprintf(status_attr_path, sizeof(status_attr_path), @@ -61,15 +61,15 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) return -1; } - length = read(fd, &status, 1); + length = read(fd, status, 1); if (length < 0) { err("error reading attribute %s", status_attr_path); close(fd); return -1; } - value = atoi(&status); - + value = atoi(status); + close(fd); return value; } diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 571c1ce37d152f86c3690d9e2427f5eeca97cd2c..5c1efb869df240f4cb0b600307dbe7fbf40d8c39 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, return 1; } -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; + __u32 insert; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->kvm->ring_lock); - if (!coalesced_mmio_has_room(dev)) { + insert = READ_ONCE(ring->last); + if (!coalesced_mmio_has_room(dev, insert) || + insert >= KVM_COALESCED_MMIO_MAX) { spin_unlock(&dev->kvm->ring_lock); return -EOPNOTSUPP; } /* copy data in first free entry of the ring */ - ring->coalesced_mmio[ring->last].phys_addr = addr; - ring->coalesced_mmio[ring->last].len = len; - memcpy(ring->coalesced_mmio[ring->last].data, val, len); + ring->coalesced_mmio[insert].phys_addr = addr; + ring->coalesced_mmio[insert].len = len; + memcpy(ring->coalesced_mmio[insert].data, val, len); smp_wmb(); - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c72586a094edb5ee23d44e3bfa2ca0a88d197c18..c0dff5337a506b2b01dff8486cf0265aad294a23 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -87,7 +88,7 @@ module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR); * kvm->lock --> kvm->slots_lock --> kvm->irq_lock */ -DEFINE_SPINLOCK(kvm_lock); +DEFINE_MUTEX(kvm_lock); static DEFINE_RAW_SPINLOCK(kvm_count_lock); LIST_HEAD(vm_list); @@ -130,10 +131,30 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) +{ + /* + * The metadata used by is_zone_device_page() to determine whether or + * not a page is ZONE_DEVICE is guaranteed to be valid if and only if + * the device has been pinned, e.g. by get_user_pages(). WARN if the + * page_count() is zero to help detect bad usage of this helper. + */ + if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) + return false; + + return is_zone_device_page(pfn_to_page(pfn)); +} + bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { + /* + * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting + * perspective they are "normal" pages, albeit with slightly different + * usage rules. + */ if (pfn_valid(pfn)) - return PageReserved(pfn_to_page(pfn)); + return PageReserved(pfn_to_page(pfn)) && + !kvm_is_zone_device_pfn(pfn); return true; } @@ -612,6 +633,23 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) return 0; } +/* + * Called after the VM is otherwise initialized, but just before adding it to + * the vm_list. + */ +int __weak kvm_arch_post_init_vm(struct kvm *kvm) +{ + return 0; +} + +/* + * Called just after removing the VM from the vm_list, but before doing any + * other destruction. + */ +void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) +{ +} + static struct kvm *kvm_create_vm(unsigned long type) { int r, i; @@ -659,22 +697,31 @@ static struct kvm *kvm_create_vm(unsigned long type) kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); if (!kvm->buses[i]) - goto out_err; + goto out_err_no_mmu_notifier; } r = kvm_init_mmu_notifier(kvm); + if (r) + goto out_err_no_mmu_notifier; + + r = kvm_arch_post_init_vm(kvm); if (r) goto out_err; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_add(&kvm->vm_list, &vm_list); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); preempt_notifier_inc(); return kvm; out_err: +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) + if (kvm->mmu_notifier.ops) + mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); +#endif +out_err_no_mmu_notifier: cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); @@ -724,9 +771,11 @@ static void kvm_destroy_vm(struct kvm *kvm) kvm_destroy_vm_debugfs(kvm); kvm_arch_sync_events(kvm); - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_del(&kvm->vm_list); - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); + kvm_arch_pre_destroy_vm(kvm); + kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) { if (kvm->buses[i]) @@ -1729,7 +1778,7 @@ static void kvm_release_pfn_dirty(kvm_pfn_t pfn) void kvm_set_pfn_dirty(kvm_pfn_t pfn) { - if (!kvm_is_reserved_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) @@ -1740,7 +1789,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(kvm_pfn_t pfn) { - if (!kvm_is_reserved_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); @@ -3752,13 +3801,13 @@ static int vm_stat_get(void *_offset, u64 *val) u64 tmp_val; *val = 0; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val); *val += tmp_val; } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); return 0; } @@ -3772,13 +3821,13 @@ static int vcpu_stat_get(void *_offset, u64 *val) u64 tmp_val; *val = 0; - spin_lock(&kvm_lock); + mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { stat_tmp.kvm = kvm; vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val); *val += tmp_val; } - spin_unlock(&kvm_lock); + mutex_unlock(&kvm_lock); return 0; } @@ -3987,3 +4036,86 @@ void kvm_exit(void) kvm_vfio_ops_exit(); } EXPORT_SYMBOL_GPL(kvm_exit); + +struct kvm_vm_worker_thread_context { + struct kvm *kvm; + struct task_struct *parent; + struct completion init_done; + kvm_vm_thread_fn_t thread_fn; + uintptr_t data; + int err; +}; + +static int kvm_vm_worker_thread(void *context) +{ + /* + * The init_context is allocated on the stack of the parent thread, so + * we have to locally copy anything that is needed beyond initialization + */ + struct kvm_vm_worker_thread_context *init_context = context; + struct kvm *kvm = init_context->kvm; + kvm_vm_thread_fn_t thread_fn = init_context->thread_fn; + uintptr_t data = init_context->data; + int err; + + err = kthread_park(current); + /* kthread_park(current) is never supposed to return an error */ + WARN_ON(err != 0); + if (err) + goto init_complete; + + err = cgroup_attach_task_all(init_context->parent, current); + if (err) { + kvm_err("%s: cgroup_attach_task_all failed with err %d\n", + __func__, err); + goto init_complete; + } + + set_user_nice(current, task_nice(init_context->parent)); + +init_complete: + init_context->err = err; + complete(&init_context->init_done); + init_context = NULL; + + if (err) + return err; + + /* Wait to be woken up by the spawner before proceeding. */ + kthread_parkme(); + + if (!kthread_should_stop()) + err = thread_fn(kvm, data); + + return err; +} + +int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, + uintptr_t data, const char *name, + struct task_struct **thread_ptr) +{ + struct kvm_vm_worker_thread_context init_context = {}; + struct task_struct *thread; + + *thread_ptr = NULL; + init_context.kvm = kvm; + init_context.parent = current; + init_context.thread_fn = thread_fn; + init_context.data = data; + init_completion(&init_context.init_done); + + thread = kthread_run(kvm_vm_worker_thread, &init_context, + "%s-%d", name, task_pid_nr(current)); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + /* kthread_run is never supposed to return NULL */ + WARN_ON(thread == NULL); + + wait_for_completion(&init_context.init_done); + + if (!init_context.err) + *thread_ptr = thread; + + return init_context.err; +}